aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-10-02 23:26:15 -0400
committerDave Airlie <airlied@redhat.com>2012-10-02 23:26:15 -0400
commit268d28371cd326be4dfcd7eba5917bf4b9d30c8f (patch)
treefec4f9e98bde15301b5d5338038a9a31f7555456
parentdf86b5765a48d5f557489577652bd6df145b0e1b (diff)
parentb9f10852fcb1f09369d931dcbfbaad89ad1da4ad (diff)
Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next
This is a major rework of the nouveau driver core, to reflect more closely how the hw is used and to make it easier to implement newer features now that the GPUs are more clearly understood than when nouveau started. It also contains a few other bits: thermal patches nv41/44 pcie gart fixes i2c unregistering fixes. * 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (191 commits) drm/nv98/crypt: fix fuc build with latest envyas drm/nouveau/devinit: fixup various issues with subdev ctor/init ordering drm/nv41/vm: fix and enable use of "real" pciegart drm/nv44/vm: fix and enable use of "real" pciegart drm/nv04/dmaobj: fixup vm target handling in preparation for nv4x pcie drm/nouveau: store supported dma mask in vmmgr drm/nvc0/ibus: initial implementation of subdev drm/nouveau/therm: add support for fan-control modes drm/nouveau/hwmon: rename pwm0* to pmw1* to follow hwmon's rules drm/nouveau/therm: calculate the pwm divisor on nv50+ drm/nouveau/fan: rewrite the fan tachometer driver to get more precision, faster drm/nouveau/therm: move thermal-related functions to the therm subdev drm/nouveau/bios: parse the pwm divisor from the perf table drm/nouveau/therm: use the EXTDEV table to detect i2c monitoring devices drm/nouveau/therm: rework thermal table parsing drm/nouveau/gpio: expose the PWM/TOGGLE parameter found in the gpio vbios table drm/nouveau: fix pm initialization order drm/nouveau/bios: check that fixed tvdac gpio data is valid before using it drm/nouveau: log channel debug/error messages from client object rather than drm client drm/nouveau: have drm debugging macros build on top of core macros ... Conflicts: drivers/gpu/drm/nouveau/nouveau_dp.c
-rw-r--r--Documentation/vfio.txt2
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c2
-rw-r--r--arch/arm/mach-orion5x/common.c7
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/barrier.h27
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h24
-rw-r--r--arch/um/include/asm/processor-generic.h9
-rw-r--r--arch/um/include/shared/common-offsets.h10
-rw-r--r--arch/um/include/shared/user.h11
-rw-r--r--arch/um/kernel/exec.c25
-rw-r--r--arch/um/kernel/process.c8
-rw-r--r--arch/um/kernel/signal.c6
-rw-r--r--arch/um/kernel/syscall.c24
-rw-r--r--arch/um/scripts/Makefile.rules2
-rw-r--r--arch/x86/um/Kconfig1
-rw-r--r--arch/x86/um/shared/sysdep/kernel-offsets.h3
-rw-r--r--arch/x86/um/shared/sysdep/syscalls.h2
-rw-r--r--arch/x86/um/signal.c6
-rw-r--r--arch/x86/um/sys_call_table_32.c2
-rw-r--r--arch/x86/um/syscalls_32.c27
-rw-r--r--arch/x86/um/syscalls_64.c23
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--drivers/block/nvme.c153
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c4
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/gpio/gpio-lpc32xx.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig36
-rw-r--r--drivers/gpu/drm/nouveau/Makefile225
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c (renamed from drivers/gpu/drm/nouveau/nouveau_util.c)47
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c318
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c223
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c (renamed from drivers/gpu/drm/nouveau/nouveau_mm.c)174
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c203
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c468
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c139
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c222
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c265
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c156
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c217
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c125
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c185
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c630
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c349
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c502
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c420
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c647
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c628
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h (renamed from drivers/gpu/drm/nouveau/nouveau_grctx.h)26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c (renamed from drivers/gpu/drm/nouveau/nv40_grctx.c)133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c (renamed from drivers/gpu/drm/nouveau/nv50_grctx.c)561
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c3039
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c2788
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h)66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc451
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h530
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h)89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc780
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h857
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_graph.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1387
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1314
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c381
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c495
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c888
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c955
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c576
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h269
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c308
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c240
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h42
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h136
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engine.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h71
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/subdev.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h72
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h90
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h77
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/device.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h134
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mxm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h (renamed from drivers/gpu/drm/nouveau/nouveau_vm.h)87
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c263
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/bit.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c417
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c177
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c359
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c (renamed from drivers/gpu/drm/nouveau/nouveau_ramht.h)56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pll.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c (renamed from drivers/gpu/drm/nouveau/nv50_calc.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c472
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c375
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c410
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c (renamed from drivers/gpu/drm/nouveau/nv98_ppp.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h98
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c189
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c (renamed from drivers/gpu/drm/nouveau/nouveau_i2c.h)65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c148
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c498
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c245
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c169
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c212
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c407
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c138
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c93
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c49
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c80
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c290
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c193
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c233
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c234
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c116
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c249
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c (renamed from drivers/gpu/drm/nouveau/nouveau_vm.c)163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c158
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c (renamed from drivers/gpu/drm/nouveau/nv50_vm.c)118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c (renamed from drivers/gpu/drm/nouveau/nvc0_vm.c)123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c152
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4566
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h178
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c437
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c234
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c397
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c278
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c693
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h144
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c513
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1655
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c224
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c177
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c808
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c435
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c394
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c131
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c744
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c723
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c163
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c462
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h186
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c377
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1306
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c331
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c354
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c142
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c132
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c129
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h184
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c55
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c67
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c506
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c1326
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c193
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c84
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c104
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c138
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c123
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1189
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c98
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv20_fb.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c836
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c116
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c346
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c163
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c210
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c467
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c89
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c551
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c268
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c296
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c294
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c155
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c868
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c428
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c247
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c133
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c237
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c250
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c216
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c203
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c274
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c150
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c476
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c897
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h97
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2878
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c504
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c452
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/md/dm-mpath.c11
-rw-r--r--drivers/md/dm-table.c61
-rw-r--r--drivers/md/dm-thin.c135
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm.c71
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/mtd/mtdchar.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c4
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/micrel.c45
-rw-r--r--drivers/net/phy/smsc.c28
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/team/team.c44
-rw-r--r--drivers/net/usb/smsc75xx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/sh/pfc/pinctrl.c2
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c76
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/lockd/svclock.c3
-rw-r--r--fs/namespace.c10
-rw-r--r--include/asm-generic/unistd.h4
-rw-r--r--include/linux/iommu.h42
-rw-r--r--include/linux/micrel_phy.h19
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/security.h1
-rw-r--r--lib/flex_proportions.c2
-rw-r--r--mm/huge_memory.c1
-rw-r--r--net/batman-adv/bat_iv_ogm.c13
-rw-r--r--net/batman-adv/soft-interface.c7
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/mgmt.c16
-rw-r--r--net/ceph/messenger.c5
-rw-r--r--net/core/sock.c3
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/raw.c14
-rw-r--r--net/ipv6/mip6.c20
-rw-r--r--net/ipv6/raw.c21
-rw-r--r--net/l2tp/l2tp_netlink.c12
-rw-r--r--net/netfilter/xt_limit.c8
-rw-r--r--net/wireless/reg.c12
-rwxr-xr-xscripts/checksyscalls.sh2
-rw-r--r--sound/soc/codecs/wm2000.c2
-rw-r--r--sound/usb/endpoint.c8
469 files changed, 54203 insertions, 38085 deletions
diff --git a/Documentation/vfio.txt b/Documentation/vfio.txt
index 0cb6685c8029..8eda3635a17d 100644
--- a/Documentation/vfio.txt
+++ b/Documentation/vfio.txt
@@ -133,7 +133,7 @@ character devices for this group:
133$ lspci -n -s 0000:06:0d.0 133$ lspci -n -s 0000:06:0d.0
13406:0d.0 0401: 1102:0002 (rev 08) 13406:0d.0 0401: 1102:0002 (rev 08)
135# echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind 135# echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
136# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id 136# echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id
137 137
138Now we need to look at what other devices are in the group to free 138Now we need to look at what other devices are in the group to free
139it for use by VFIO: 139it for use by VFIO:
diff --git a/MAINTAINERS b/MAINTAINERS
index b17587d9412f..9a6c4da3b2ff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3552,11 +3552,12 @@ K: \b(ABS|SYN)_MT_
3552 3552
3553INTEL C600 SERIES SAS CONTROLLER DRIVER 3553INTEL C600 SERIES SAS CONTROLLER DRIVER
3554M: Intel SCU Linux support <intel-linux-scu@intel.com> 3554M: Intel SCU Linux support <intel-linux-scu@intel.com>
3555M: Lukasz Dorau <lukasz.dorau@intel.com>
3556M: Maciej Patelczyk <maciej.patelczyk@intel.com>
3555M: Dave Jiang <dave.jiang@intel.com> 3557M: Dave Jiang <dave.jiang@intel.com>
3556M: Ed Nadolski <edmund.nadolski@intel.com>
3557L: linux-scsi@vger.kernel.org 3558L: linux-scsi@vger.kernel.org
3558T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git 3559T: git git://git.code.sf.net/p/intel-sas/isci
3559S: Maintained 3560S: Supported
3560F: drivers/scsi/isci/ 3561F: drivers/scsi/isci/
3561F: firmware/isci/ 3562F: firmware/isci/
3562 3563
@@ -5544,6 +5545,8 @@ F: Documentation/devicetree/bindings/pwm/
5544F: include/linux/pwm.h 5545F: include/linux/pwm.h
5545F: include/linux/of_pwm.h 5546F: include/linux/of_pwm.h
5546F: drivers/pwm/ 5547F: drivers/pwm/
5548F: drivers/video/backlight/pwm_bl.c
5549F: include/linux/pwm_backlight.h
5547 5550
5548PXA2xx/PXA3xx SUPPORT 5551PXA2xx/PXA3xx SUPPORT
5549M: Eric Miao <eric.y.miao@gmail.com> 5552M: Eric Miao <eric.y.miao@gmail.com>
diff --git a/Makefile b/Makefile
index a3c11d589681..bb9fff26f078 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Terrified Chipmunk 5NAME = Terrified Chipmunk
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 8dabfe81d07c..ff886e01a0b0 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -261,7 +261,7 @@ static void __init apx4devkit_init(void)
261 enable_clk_enet_out(); 261 enable_clk_enet_out();
262 262
263 if (IS_BUILTIN(CONFIG_PHYLIB)) 263 if (IS_BUILTIN(CONFIG_PHYLIB))
264 phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, 264 phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK,
265 apx4devkit_phy_fixup); 265 apx4devkit_phy_fixup);
266 266
267 mxsfb_pdata.mode_list = apx4devkit_video_modes; 267 mxsfb_pdata.mode_list = apx4devkit_video_modes;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 410291c67666..a6cd14ab1e4e 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -204,6 +204,13 @@ void __init orion5x_wdt_init(void)
204void __init orion5x_init_early(void) 204void __init orion5x_init_early(void)
205{ 205{
206 orion_time_set_base(TIMER_VIRT_BASE); 206 orion_time_set_base(TIMER_VIRT_BASE);
207
208 /*
209 * Some Orion5x devices allocate their coherent buffers from atomic
210 * context. Increase size of atomic coherent pool to make sure such
211 * the allocations won't fail.
212 */
213 init_dma_coherent_pool_size(SZ_1M);
207} 214}
208 215
209int orion5x_tclk; 216int orion5x_tclk;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e59c4ab71bcb..13f555d62491 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -346,6 +346,8 @@ static int __init atomic_pool_init(void)
346 (unsigned)pool->size / 1024); 346 (unsigned)pool->size / 1024);
347 return 0; 347 return 0;
348 } 348 }
349
350 kfree(pages);
349no_pages: 351no_pages:
350 kfree(bitmap); 352 kfree(bitmap);
351no_bitmap: 353no_bitmap:
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 3af601e31e66..f08e89183cda 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
2 2
3generic-y += atomic.h 3generic-y += atomic.h
4generic-y += auxvec.h 4generic-y += auxvec.h
5generic-y += barrier.h
5generic-y += bitsperlong.h 6generic-y += bitsperlong.h
6generic-y += bugs.h 7generic-y += bugs.h
7generic-y += cputime.h 8generic-y += cputime.h
diff --git a/arch/c6x/include/asm/barrier.h b/arch/c6x/include/asm/barrier.h
deleted file mode 100644
index 538240e85909..000000000000
--- a/arch/c6x/include/asm/barrier.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _ASM_C6X_BARRIER_H
12#define _ASM_C6X_BARRIER_H
13
14#define nop() asm("NOP\n");
15
16#define mb() barrier()
17#define rmb() barrier()
18#define wmb() barrier()
19#define set_mb(var, value) do { var = value; mb(); } while (0)
20#define set_wmb(var, value) do { var = value; wmb(); } while (0)
21
22#define smp_mb() barrier()
23#define smp_rmb() barrier()
24#define smp_wmb() barrier()
25#define smp_read_barrier_depends() do { } while (0)
26
27#endif /* _ASM_C6X_BARRIER_H */
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
index 15fb77992083..58105c31228b 100644
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -25,21 +25,23 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27 27
28#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) 28#define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
29#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401)
29 30
30#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) 31#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
31 32
32#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) 33#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
33#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f)
34 34
35#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) 35#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
36#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418)
37#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419)
38#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a)
39 36
40#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) 37#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
41#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) 38#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f)
42#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) 39#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420)
40#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421)
41
42#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423)
43#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424)
44#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425)
43#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 45#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
44#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 46#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
45 47
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 69f1c57a8d0d..33a6a2423bd2 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -20,14 +20,6 @@ struct mm_struct;
20 20
21struct thread_struct { 21struct thread_struct {
22 struct task_struct *saved_task; 22 struct task_struct *saved_task;
23 /*
24 * This flag is set to 1 before calling do_fork (and analyzed in
25 * copy_thread) to mark that we are begin called from userspace (fork /
26 * vfork / clone), and reset to 0 after. It is left to 0 when called
27 * from kernelspace (i.e. kernel_thread() or fork_idle(),
28 * as of 2.6.11).
29 */
30 int forking;
31 struct pt_regs regs; 23 struct pt_regs regs;
32 int singlestep_syscall; 24 int singlestep_syscall;
33 void *fault_addr; 25 void *fault_addr;
@@ -58,7 +50,6 @@ struct thread_struct {
58 50
59#define INIT_THREAD \ 51#define INIT_THREAD \
60{ \ 52{ \
61 .forking = 0, \
62 .regs = EMPTY_REGS, \ 53 .regs = EMPTY_REGS, \
63 .fault_addr = NULL, \ 54 .fault_addr = NULL, \
64 .prev_sched = NULL, \ 55 .prev_sched = NULL, \
diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h
index 40db8f71deae..2df313b6a586 100644
--- a/arch/um/include/shared/common-offsets.h
+++ b/arch/um/include/shared/common-offsets.h
@@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
7DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT); 7DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT);
8DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); 8DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
9 9
10DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
11DEFINE_STR(UM_KERN_ALERT, KERN_ALERT);
12DEFINE_STR(UM_KERN_CRIT, KERN_CRIT);
13DEFINE_STR(UM_KERN_ERR, KERN_ERR);
14DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
15DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
16DEFINE_STR(UM_KERN_INFO, KERN_INFO);
17DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
18DEFINE_STR(UM_KERN_CONT, KERN_CONT);
19
20DEFINE(UM_ELF_CLASS, ELF_CLASS); 10DEFINE(UM_ELF_CLASS, ELF_CLASS);
21DEFINE(UM_ELFCLASS32, ELFCLASS32); 11DEFINE(UM_ELFCLASS32, ELFCLASS32);
22DEFINE(UM_ELFCLASS64, ELFCLASS64); 12DEFINE(UM_ELFCLASS64, ELFCLASS64);
diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
index 4fa82c055aab..cef068563336 100644
--- a/arch/um/include/shared/user.h
+++ b/arch/um/include/shared/user.h
@@ -26,6 +26,17 @@
26extern void panic(const char *fmt, ...) 26extern void panic(const char *fmt, ...)
27 __attribute__ ((format (printf, 1, 2))); 27 __attribute__ ((format (printf, 1, 2)));
28 28
29/* Requires preincluding include/linux/kern_levels.h */
30#define UM_KERN_EMERG KERN_EMERG
31#define UM_KERN_ALERT KERN_ALERT
32#define UM_KERN_CRIT KERN_CRIT
33#define UM_KERN_ERR KERN_ERR
34#define UM_KERN_WARNING KERN_WARNING
35#define UM_KERN_NOTICE KERN_NOTICE
36#define UM_KERN_INFO KERN_INFO
37#define UM_KERN_DEBUG KERN_DEBUG
38#define UM_KERN_CONT KERN_CONT
39
29#ifdef UML_CONFIG_PRINTK 40#ifdef UML_CONFIG_PRINTK
30extern int printk(const char *fmt, ...) 41extern int printk(const char *fmt, ...)
31 __attribute__ ((format (printf, 1, 2))); 42 __attribute__ ((format (printf, 1, 2)));
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 6cade9366364..8c82786da823 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -39,34 +39,21 @@ void flush_thread(void)
39 39
40void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) 40void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
41{ 41{
42 get_safe_registers(regs->regs.gp, regs->regs.fp);
42 PT_REGS_IP(regs) = eip; 43 PT_REGS_IP(regs) = eip;
43 PT_REGS_SP(regs) = esp; 44 PT_REGS_SP(regs) = esp;
44} 45 current->ptrace &= ~PT_DTRACE;
45EXPORT_SYMBOL(start_thread);
46
47static long execve1(const char *file,
48 const char __user *const __user *argv,
49 const char __user *const __user *env)
50{
51 long error;
52
53 error = do_execve(file, argv, env, &current->thread.regs);
54 if (error == 0) {
55 task_lock(current);
56 current->ptrace &= ~PT_DTRACE;
57#ifdef SUBARCH_EXECVE1 46#ifdef SUBARCH_EXECVE1
58 SUBARCH_EXECVE1(&current->thread.regs.regs); 47 SUBARCH_EXECVE1(regs->regs);
59#endif 48#endif
60 task_unlock(current);
61 }
62 return error;
63} 49}
50EXPORT_SYMBOL(start_thread);
64 51
65long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) 52long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
66{ 53{
67 long err; 54 long err;
68 55
69 err = execve1(file, argv, env); 56 err = do_execve(file, argv, env, &current->thread.regs);
70 if (!err) 57 if (!err)
71 UML_LONGJMP(current->thread.exec_buf, 1); 58 UML_LONGJMP(current->thread.exec_buf, 1);
72 return err; 59 return err;
@@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv,
81 filename = getname(file); 68 filename = getname(file);
82 error = PTR_ERR(filename); 69 error = PTR_ERR(filename);
83 if (IS_ERR(filename)) goto out; 70 if (IS_ERR(filename)) goto out;
84 error = execve1(filename, argv, env); 71 error = do_execve(filename, argv, env, &current->thread.regs);
85 putname(filename); 72 putname(filename);
86 out: 73 out:
87 return error; 74 return error;
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 57fc7028714a..c5f5afa50745 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
181 struct pt_regs *regs) 181 struct pt_regs *regs)
182{ 182{
183 void (*handler)(void); 183 void (*handler)(void);
184 int kthread = current->flags & PF_KTHREAD;
184 int ret = 0; 185 int ret = 0;
185 186
186 p->thread = (struct thread_struct) INIT_THREAD; 187 p->thread = (struct thread_struct) INIT_THREAD;
187 188
188 if (current->thread.forking) { 189 if (!kthread) {
189 memcpy(&p->thread.regs.regs, &regs->regs, 190 memcpy(&p->thread.regs.regs, &regs->regs,
190 sizeof(p->thread.regs.regs)); 191 sizeof(p->thread.regs.regs));
191 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); 192 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
195 handler = fork_handler; 196 handler = fork_handler;
196 197
197 arch_copy_thread(&current->thread.arch, &p->thread.arch); 198 arch_copy_thread(&current->thread.arch, &p->thread.arch);
198 } 199 } else {
199 else {
200 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 200 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
201 p->thread.request.u.thread = current->thread.request.u.thread; 201 p->thread.request.u.thread = current->thread.request.u.thread;
202 handler = new_thread_handler; 202 handler = new_thread_handler;
@@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
204 204
205 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); 205 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
206 206
207 if (current->thread.forking) { 207 if (!kthread) {
208 clear_flushed_tls(p); 208 clear_flushed_tls(p);
209 209
210 /* 210 /*
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 7362d58efc29..cc9c2350e417 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
22 struct k_sigaction *ka, siginfo_t *info) 22 struct k_sigaction *ka, siginfo_t *info)
23{ 23{
24 sigset_t *oldset = sigmask_to_save(); 24 sigset_t *oldset = sigmask_to_save();
25 int singlestep = 0;
25 unsigned long sp; 26 unsigned long sp;
26 int err; 27 int err;
27 28
29 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
30 singlestep = 1;
31
28 /* Did we come from a system call? */ 32 /* Did we come from a system call? */
29 if (PT_REGS_SYSCALL_NR(regs) >= 0) { 33 if (PT_REGS_SYSCALL_NR(regs) >= 0) {
30 /* If so, check system call restarting.. */ 34 /* If so, check system call restarting.. */
@@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
61 if (err) 65 if (err)
62 force_sigsegv(signr, current); 66 force_sigsegv(signr, current);
63 else 67 else
64 signal_delivered(signr, info, ka, regs, 0); 68 signal_delivered(signr, info, ka, regs, singlestep);
65} 69}
66 70
67static int kern_do_signal(struct pt_regs *regs) 71static int kern_do_signal(struct pt_regs *regs)
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index f958cb876ee3..a4c6d8eee74c 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -17,25 +17,25 @@
17 17
18long sys_fork(void) 18long sys_fork(void)
19{ 19{
20 long ret; 20 return do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
21
22 current->thread.forking = 1;
23 ret = do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
24 &current->thread.regs, 0, NULL, NULL); 21 &current->thread.regs, 0, NULL, NULL);
25 current->thread.forking = 0;
26 return ret;
27} 22}
28 23
29long sys_vfork(void) 24long sys_vfork(void)
30{ 25{
31 long ret; 26 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
32
33 current->thread.forking = 1;
34 ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
35 UPT_SP(&current->thread.regs.regs), 27 UPT_SP(&current->thread.regs.regs),
36 &current->thread.regs, 0, NULL, NULL); 28 &current->thread.regs, 0, NULL, NULL);
37 current->thread.forking = 0; 29}
38 return ret; 30
31long sys_clone(unsigned long clone_flags, unsigned long newsp,
32 void __user *parent_tid, void __user *child_tid)
33{
34 if (!newsp)
35 newsp = UPT_SP(&current->thread.regs.regs);
36
37 return do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
38 child_tid);
39} 39}
40 40
41long old_mmap(unsigned long addr, unsigned long len, 41long old_mmap(unsigned long addr, unsigned long len,
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index d50270d26b42..15889df9b466 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS))
8USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) 8USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
9 9
10$(USER_OBJS:.o=.%): \ 10$(USER_OBJS:.o=.%): \
11 c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o) 11 c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o)
12 12
13# These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of 13# These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of
14# using it directly. 14# using it directly.
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 9926e11a772d..aeaff8bef2f1 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -21,6 +21,7 @@ config 64BIT
21config X86_32 21config X86_32
22 def_bool !64BIT 22 def_bool !64BIT
23 select HAVE_AOUT 23 select HAVE_AOUT
24 select ARCH_WANT_IPC_PARSE_VERSION
24 25
25config X86_64 26config X86_64
26 def_bool 64BIT 27 def_bool 64BIT
diff --git a/arch/x86/um/shared/sysdep/kernel-offsets.h b/arch/x86/um/shared/sysdep/kernel-offsets.h
index 5868526b5eef..46a9df99f3c5 100644
--- a/arch/x86/um/shared/sysdep/kernel-offsets.h
+++ b/arch/x86/um/shared/sysdep/kernel-offsets.h
@@ -7,9 +7,6 @@
7#define DEFINE(sym, val) \ 7#define DEFINE(sym, val) \
8 asm volatile("\n->" #sym " %0 " #val : : "i" (val)) 8 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
9 9
10#define STR(x) #x
11#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
12
13#define BLANK() asm volatile("\n->" : : ) 10#define BLANK() asm volatile("\n->" : : )
14 11
15#define OFFSET(sym, str, mem) \ 12#define OFFSET(sym, str, mem) \
diff --git a/arch/x86/um/shared/sysdep/syscalls.h b/arch/x86/um/shared/sysdep/syscalls.h
index bd9a89b67e41..ca255a805ed9 100644
--- a/arch/x86/um/shared/sysdep/syscalls.h
+++ b/arch/x86/um/shared/sysdep/syscalls.h
@@ -1,3 +1,5 @@
1extern long sys_clone(unsigned long clone_flags, unsigned long newsp,
2 void __user *parent_tid, void __user *child_tid);
1#ifdef __i386__ 3#ifdef __i386__
2#include "syscalls_32.h" 4#include "syscalls_32.h"
3#else 5#else
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index a508cea13503..ba7363ecf896 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
416 PT_REGS_AX(regs) = (unsigned long) sig; 416 PT_REGS_AX(regs) = (unsigned long) sig;
417 PT_REGS_DX(regs) = (unsigned long) 0; 417 PT_REGS_DX(regs) = (unsigned long) 0;
418 PT_REGS_CX(regs) = (unsigned long) 0; 418 PT_REGS_CX(regs) = (unsigned long) 0;
419
420 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
421 ptrace_notify(SIGTRAP);
422 return 0; 419 return 0;
423} 420}
424 421
@@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
466 PT_REGS_AX(regs) = (unsigned long) sig; 463 PT_REGS_AX(regs) = (unsigned long) sig;
467 PT_REGS_DX(regs) = (unsigned long) &frame->info; 464 PT_REGS_DX(regs) = (unsigned long) &frame->info;
468 PT_REGS_CX(regs) = (unsigned long) &frame->uc; 465 PT_REGS_CX(regs) = (unsigned long) &frame->uc;
469
470 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
471 ptrace_notify(SIGTRAP);
472 return 0; 466 return 0;
473} 467}
474 468
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 68d1dc91b37b..b5408cecac6c 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -28,7 +28,7 @@
28#define ptregs_execve sys_execve 28#define ptregs_execve sys_execve
29#define ptregs_iopl sys_iopl 29#define ptregs_iopl sys_iopl
30#define ptregs_vm86old sys_vm86old 30#define ptregs_vm86old sys_vm86old
31#define ptregs_clone sys_clone 31#define ptregs_clone i386_clone
32#define ptregs_vm86 sys_vm86 32#define ptregs_vm86 sys_vm86
33#define ptregs_sigaltstack sys_sigaltstack 33#define ptregs_sigaltstack sys_sigaltstack
34#define ptregs_vfork sys_vfork 34#define ptregs_vfork sys_vfork
diff --git a/arch/x86/um/syscalls_32.c b/arch/x86/um/syscalls_32.c
index b853e8600b9d..db444c7218fe 100644
--- a/arch/x86/um/syscalls_32.c
+++ b/arch/x86/um/syscalls_32.c
@@ -3,37 +3,24 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include "linux/sched.h" 6#include <linux/syscalls.h>
7#include "linux/shm.h" 7#include <sysdep/syscalls.h>
8#include "linux/ipc.h"
9#include "linux/syscalls.h"
10#include "asm/mman.h"
11#include "asm/uaccess.h"
12#include "asm/unistd.h"
13 8
14/* 9/*
15 * The prototype on i386 is: 10 * The prototype on i386 is:
16 * 11 *
17 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) 12 * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls
18 * 13 *
19 * and the "newtls" arg. on i386 is read by copy_thread directly from the 14 * and the "newtls" arg. on i386 is read by copy_thread directly from the
20 * register saved on the stack. 15 * register saved on the stack.
21 */ 16 */
22long sys_clone(unsigned long clone_flags, unsigned long newsp, 17long i386_clone(unsigned long clone_flags, unsigned long newsp,
23 int __user *parent_tid, void *newtls, int __user *child_tid) 18 int __user *parent_tid, void *newtls, int __user *child_tid)
24{ 19{
25 long ret; 20 return sys_clone(clone_flags, newsp, parent_tid, child_tid);
26
27 if (!newsp)
28 newsp = UPT_SP(&current->thread.regs.regs);
29
30 current->thread.forking = 1;
31 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
32 child_tid);
33 current->thread.forking = 0;
34 return ret;
35} 21}
36 22
23
37long sys_sigaction(int sig, const struct old_sigaction __user *act, 24long sys_sigaction(int sig, const struct old_sigaction __user *act,
38 struct old_sigaction __user *oact) 25 struct old_sigaction __user *oact)
39{ 26{
diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c
index f3d82bb6e15a..adb08eb5c22a 100644
--- a/arch/x86/um/syscalls_64.c
+++ b/arch/x86/um/syscalls_64.c
@@ -5,12 +5,9 @@
5 * Licensed under the GPL 5 * Licensed under the GPL
6 */ 6 */
7 7
8#include "linux/linkage.h" 8#include <linux/sched.h>
9#include "linux/personality.h" 9#include <asm/prctl.h> /* XXX This should get the constants from libc */
10#include "linux/utsname.h" 10#include <os.h>
11#include "asm/prctl.h" /* XXX This should get the constants from libc */
12#include "asm/uaccess.h"
13#include "os.h"
14 11
15long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) 12long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
16{ 13{
@@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr)
79 return arch_prctl(current, code, (unsigned long __user *) addr); 76 return arch_prctl(current, code, (unsigned long __user *) addr);
80} 77}
81 78
82long sys_clone(unsigned long clone_flags, unsigned long newsp,
83 void __user *parent_tid, void __user *child_tid)
84{
85 long ret;
86
87 if (!newsp)
88 newsp = UPT_SP(&current->thread.regs.regs);
89 current->thread.forking = 1;
90 ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
91 child_tid);
92 current->thread.forking = 0;
93 return ret;
94}
95
96void arch_switch_to(struct task_struct *to) 79void arch_switch_to(struct task_struct *to)
97{ 80{
98 if ((to->thread.arch.fs == 0) || (to->mm == NULL)) 81 if ((to->thread.arch.fs == 0) || (to->mm == NULL))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index d11ca11d14fc..e2d62d697b5d 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -17,6 +17,7 @@
17#include <asm/e820.h> 17#include <asm/e820.h>
18#include <asm/setup.h> 18#include <asm/setup.h>
19#include <asm/acpi.h> 19#include <asm/acpi.h>
20#include <asm/numa.h>
20#include <asm/xen/hypervisor.h> 21#include <asm/xen/hypervisor.h>
21#include <asm/xen/hypercall.h> 22#include <asm/xen/hypercall.h>
22 23
@@ -544,4 +545,7 @@ void __init xen_arch_setup(void)
544 disable_cpufreq(); 545 disable_cpufreq();
545 WARN_ON(set_pm_idle_to_default()); 546 WARN_ON(set_pm_idle_to_default());
546 fiddle_vdso(); 547 fiddle_vdso();
548#ifdef CONFIG_NUMA
549 numa_off = 1;
550#endif
547} 551}
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 38a2d0631882..ad16c68c8645 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -79,6 +79,7 @@ struct nvme_dev {
79 char serial[20]; 79 char serial[20];
80 char model[40]; 80 char model[40];
81 char firmware_rev[8]; 81 char firmware_rev[8];
82 u32 max_hw_sectors;
82}; 83};
83 84
84/* 85/*
@@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
835} 836}
836 837
837static int nvme_get_features(struct nvme_dev *dev, unsigned fid, 838static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
838 unsigned dword11, dma_addr_t dma_addr) 839 unsigned nsid, dma_addr_t dma_addr)
839{ 840{
840 struct nvme_command c; 841 struct nvme_command c;
841 842
842 memset(&c, 0, sizeof(c)); 843 memset(&c, 0, sizeof(c));
843 c.features.opcode = nvme_admin_get_features; 844 c.features.opcode = nvme_admin_get_features;
845 c.features.nsid = cpu_to_le32(nsid);
844 c.features.prp1 = cpu_to_le64(dma_addr); 846 c.features.prp1 = cpu_to_le64(dma_addr);
845 c.features.fid = cpu_to_le32(fid); 847 c.features.fid = cpu_to_le32(fid);
846 c.features.dword11 = cpu_to_le32(dword11);
847 848
848 return nvme_submit_admin_cmd(dev, &c, NULL); 849 return nvme_submit_admin_cmd(dev, &c, NULL);
849} 850}
@@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
862 return nvme_submit_admin_cmd(dev, &c, result); 863 return nvme_submit_admin_cmd(dev, &c, result);
863} 864}
864 865
866/**
867 * nvme_cancel_ios - Cancel outstanding I/Os
868 * @queue: The queue to cancel I/Os on
869 * @timeout: True to only cancel I/Os which have timed out
870 */
871static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
872{
873 int depth = nvmeq->q_depth - 1;
874 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
875 unsigned long now = jiffies;
876 int cmdid;
877
878 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
879 void *ctx;
880 nvme_completion_fn fn;
881 static struct nvme_completion cqe = {
882 .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
883 };
884
885 if (timeout && !time_after(now, info[cmdid].timeout))
886 continue;
887 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
888 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
889 fn(nvmeq->dev, ctx, &cqe);
890 }
891}
892
893static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
894{
895 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
896 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
897 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
898 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
899 kfree(nvmeq);
900}
901
865static void nvme_free_queue(struct nvme_dev *dev, int qid) 902static void nvme_free_queue(struct nvme_dev *dev, int qid)
866{ 903{
867 struct nvme_queue *nvmeq = dev->queues[qid]; 904 struct nvme_queue *nvmeq = dev->queues[qid];
868 int vector = dev->entry[nvmeq->cq_vector].vector; 905 int vector = dev->entry[nvmeq->cq_vector].vector;
869 906
907 spin_lock_irq(&nvmeq->q_lock);
908 nvme_cancel_ios(nvmeq, false);
909 spin_unlock_irq(&nvmeq->q_lock);
910
870 irq_set_affinity_hint(vector, NULL); 911 irq_set_affinity_hint(vector, NULL);
871 free_irq(vector, nvmeq); 912 free_irq(vector, nvmeq);
872 913
@@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
876 adapter_delete_cq(dev, qid); 917 adapter_delete_cq(dev, qid);
877 } 918 }
878 919
879 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 920 nvme_free_queue_mem(nvmeq);
880 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
881 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
882 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
883 kfree(nvmeq);
884} 921}
885 922
886static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 923static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
887 int depth, int vector) 924 int depth, int vector)
888{ 925{
889 struct device *dmadev = &dev->pci_dev->dev; 926 struct device *dmadev = &dev->pci_dev->dev;
890 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); 927 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
928 sizeof(struct nvme_cmd_info));
891 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 929 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
892 if (!nvmeq) 930 if (!nvmeq)
893 return NULL; 931 return NULL;
@@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
975 1013
976static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 1014static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
977{ 1015{
978 int result; 1016 int result = 0;
979 u32 aqa; 1017 u32 aqa;
980 u64 cap; 1018 u64 cap;
981 unsigned long timeout; 1019 unsigned long timeout;
@@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
1005 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1043 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1006 dev->db_stride = NVME_CAP_STRIDE(cap); 1044 dev->db_stride = NVME_CAP_STRIDE(cap);
1007 1045
1008 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 1046 while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1009 msleep(100); 1047 msleep(100);
1010 if (fatal_signal_pending(current)) 1048 if (fatal_signal_pending(current))
1011 return -EINTR; 1049 result = -EINTR;
1012 if (time_after(jiffies, timeout)) { 1050 if (time_after(jiffies, timeout)) {
1013 dev_err(&dev->pci_dev->dev, 1051 dev_err(&dev->pci_dev->dev,
1014 "Device not ready; aborting initialisation\n"); 1052 "Device not ready; aborting initialisation\n");
1015 return -ENODEV; 1053 result = -ENODEV;
1016 } 1054 }
1017 } 1055 }
1018 1056
1057 if (result) {
1058 nvme_free_queue_mem(nvmeq);
1059 return result;
1060 }
1061
1019 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1062 result = queue_request_irq(dev, nvmeq, "nvme admin");
1020 dev->queues[0] = nvmeq; 1063 dev->queues[0] = nvmeq;
1021 return result; 1064 return result;
@@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1037 offset = offset_in_page(addr); 1080 offset = offset_in_page(addr);
1038 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1081 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1039 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1082 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
1083 if (!pages)
1084 return ERR_PTR(-ENOMEM);
1040 1085
1041 err = get_user_pages_fast(addr, count, 1, pages); 1086 err = get_user_pages_fast(addr, count, 1, pages);
1042 if (err < count) { 1087 if (err < count) {
@@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1146 return status; 1191 return status;
1147} 1192}
1148 1193
1149static int nvme_user_admin_cmd(struct nvme_ns *ns, 1194static int nvme_user_admin_cmd(struct nvme_dev *dev,
1150 struct nvme_admin_cmd __user *ucmd) 1195 struct nvme_admin_cmd __user *ucmd)
1151{ 1196{
1152 struct nvme_dev *dev = ns->dev;
1153 struct nvme_admin_cmd cmd; 1197 struct nvme_admin_cmd cmd;
1154 struct nvme_command c; 1198 struct nvme_command c;
1155 int status, length; 1199 int status, length;
1156 struct nvme_iod *iod; 1200 struct nvme_iod *uninitialized_var(iod);
1157 1201
1158 if (!capable(CAP_SYS_ADMIN)) 1202 if (!capable(CAP_SYS_ADMIN))
1159 return -EACCES; 1203 return -EACCES;
@@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1204 case NVME_IOCTL_ID: 1248 case NVME_IOCTL_ID:
1205 return ns->ns_id; 1249 return ns->ns_id;
1206 case NVME_IOCTL_ADMIN_CMD: 1250 case NVME_IOCTL_ADMIN_CMD:
1207 return nvme_user_admin_cmd(ns, (void __user *)arg); 1251 return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
1208 case NVME_IOCTL_SUBMIT_IO: 1252 case NVME_IOCTL_SUBMIT_IO:
1209 return nvme_submit_io(ns, (void __user *)arg); 1253 return nvme_submit_io(ns, (void __user *)arg);
1210 default: 1254 default:
@@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = {
1218 .compat_ioctl = nvme_ioctl, 1262 .compat_ioctl = nvme_ioctl,
1219}; 1263};
1220 1264
1221static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1222{
1223 int depth = nvmeq->q_depth - 1;
1224 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1225 unsigned long now = jiffies;
1226 int cmdid;
1227
1228 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1229 void *ctx;
1230 nvme_completion_fn fn;
1231 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1232
1233 if (!time_after(now, info[cmdid].timeout))
1234 continue;
1235 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1236 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1237 fn(nvmeq->dev, ctx, &cqe);
1238 }
1239}
1240
1241static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1265static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1242{ 1266{
1243 while (bio_list_peek(&nvmeq->sq_cong)) { 1267 while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data)
1269 spin_lock_irq(&nvmeq->q_lock); 1293 spin_lock_irq(&nvmeq->q_lock);
1270 if (nvme_process_cq(nvmeq)) 1294 if (nvme_process_cq(nvmeq))
1271 printk("process_cq did something\n"); 1295 printk("process_cq did something\n");
1272 nvme_timeout_ios(nvmeq); 1296 nvme_cancel_ios(nvmeq, true);
1273 nvme_resubmit_bios(nvmeq); 1297 nvme_resubmit_bios(nvmeq);
1274 spin_unlock_irq(&nvmeq->q_lock); 1298 spin_unlock_irq(&nvmeq->q_lock);
1275 } 1299 }
@@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
1339 ns->disk = disk; 1363 ns->disk = disk;
1340 lbaf = id->flbas & 0xf; 1364 lbaf = id->flbas & 0xf;
1341 ns->lba_shift = id->lbaf[lbaf].ds; 1365 ns->lba_shift = id->lbaf[lbaf].ds;
1366 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1367 if (dev->max_hw_sectors)
1368 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
1342 1369
1343 disk->major = nvme_major; 1370 disk->major = nvme_major;
1344 disk->minors = NVME_MINORS; 1371 disk->minors = NVME_MINORS;
@@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1383 1410
1384static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1411static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1385{ 1412{
1386 int result, cpu, i, nr_io_queues, db_bar_size; 1413 int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
1387 1414
1388 nr_io_queues = num_online_cpus(); 1415 nr_io_queues = num_online_cpus();
1389 result = set_queue_count(dev, nr_io_queues); 1416 result = set_queue_count(dev, nr_io_queues);
@@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1429 cpu = cpumask_next(cpu, cpu_online_mask); 1456 cpu = cpumask_next(cpu, cpu_online_mask);
1430 } 1457 }
1431 1458
1459 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1460 NVME_Q_DEPTH);
1432 for (i = 0; i < nr_io_queues; i++) { 1461 for (i = 0; i < nr_io_queues; i++) {
1433 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, 1462 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
1434 NVME_Q_DEPTH, i);
1435 if (IS_ERR(dev->queues[i + 1])) 1463 if (IS_ERR(dev->queues[i + 1]))
1436 return PTR_ERR(dev->queues[i + 1]); 1464 return PTR_ERR(dev->queues[i + 1]);
1437 dev->queue_count++; 1465 dev->queue_count++;
@@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
1480 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1508 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1481 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1509 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1482 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1510 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
1511 if (ctrl->mdts) {
1512 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
1513 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
1514 }
1483 1515
1484 id_ns = mem; 1516 id_ns = mem;
1485 for (i = 1; i <= nn; i++) { 1517 for (i = 1; i <= nn; i++) {
@@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
1523 list_del(&dev->node); 1555 list_del(&dev->node);
1524 spin_unlock(&dev_list_lock); 1556 spin_unlock(&dev_list_lock);
1525 1557
1526 /* TODO: wait all I/O finished or cancel them */
1527
1528 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1558 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1529 list_del(&ns->list); 1559 list_del(&ns->list);
1530 del_gendisk(ns->disk); 1560 del_gendisk(ns->disk);
@@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
1560 dma_pool_destroy(dev->prp_small_pool); 1590 dma_pool_destroy(dev->prp_small_pool);
1561} 1591}
1562 1592
1563/* XXX: Use an ida or something to let remove / add work correctly */ 1593static DEFINE_IDA(nvme_instance_ida);
1564static void nvme_set_instance(struct nvme_dev *dev) 1594
1595static int nvme_set_instance(struct nvme_dev *dev)
1565{ 1596{
1566 static int instance; 1597 int instance, error;
1567 dev->instance = instance++; 1598
1599 do {
1600 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1601 return -ENODEV;
1602
1603 spin_lock(&dev_list_lock);
1604 error = ida_get_new(&nvme_instance_ida, &instance);
1605 spin_unlock(&dev_list_lock);
1606 } while (error == -EAGAIN);
1607
1608 if (error)
1609 return -ENODEV;
1610
1611 dev->instance = instance;
1612 return 0;
1568} 1613}
1569 1614
1570static void nvme_release_instance(struct nvme_dev *dev) 1615static void nvme_release_instance(struct nvme_dev *dev)
1571{ 1616{
1617 spin_lock(&dev_list_lock);
1618 ida_remove(&nvme_instance_ida, dev->instance);
1619 spin_unlock(&dev_list_lock);
1572} 1620}
1573 1621
1574static int __devinit nvme_probe(struct pci_dev *pdev, 1622static int __devinit nvme_probe(struct pci_dev *pdev,
@@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
1601 pci_set_drvdata(pdev, dev); 1649 pci_set_drvdata(pdev, dev);
1602 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1650 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1603 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1651 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1604 nvme_set_instance(dev); 1652 result = nvme_set_instance(dev);
1653 if (result)
1654 goto disable;
1655
1605 dev->entry[0].vector = pdev->irq; 1656 dev->entry[0].vector = pdev->irq;
1606 1657
1607 result = nvme_setup_prp_pools(dev); 1658 result = nvme_setup_prp_pools(dev);
@@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = {
1704 1755
1705static int __init nvme_init(void) 1756static int __init nvme_init(void)
1706{ 1757{
1707 int result = -EBUSY; 1758 int result;
1708 1759
1709 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1760 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1710 if (IS_ERR(nvme_thread)) 1761 if (IS_ERR(nvme_thread))
1711 return PTR_ERR(nvme_thread); 1762 return PTR_ERR(nvme_thread);
1712 1763
1713 nvme_major = register_blkdev(nvme_major, "nvme"); 1764 result = register_blkdev(nvme_major, "nvme");
1714 if (nvme_major <= 0) 1765 if (result < 0)
1715 goto kill_kthread; 1766 goto kill_kthread;
1767 else if (result > 0)
1768 nvme_major = result;
1716 1769
1717 result = pci_register_driver(&nvme_driver); 1770 result = pci_register_driver(&nvme_driver);
1718 if (result) 1771 if (result)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9917943a3572..54a55f03115d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
246{ 246{
247 struct rbd_device *rbd_dev = bdev->bd_disk->private_data; 247 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
248 248
249 rbd_get_dev(rbd_dev);
250
251 set_device_ro(bdev, rbd_dev->read_only);
252
253 if ((mode & FMODE_WRITE) && rbd_dev->read_only) 249 if ((mode & FMODE_WRITE) && rbd_dev->read_only)
254 return -EROFS; 250 return -EROFS;
255 251
252 rbd_get_dev(rbd_dev);
253 set_device_ro(bdev, rbd_dev->read_only);
254
256 return 0; 255 return 0;
257} 256}
258 257
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 47180a08edad..b6653a6fc5d5 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
391 for (j = 0; j < nr_channels; j++) { 391 for (j = 0; j < nr_channels; j++) {
392 struct dimm_info *dimm = csrow->channels[j]->dimm; 392 struct dimm_info *dimm = csrow->channels[j]->dimm;
393 393
394 dimm->nr_pages = nr_pages / nr_channels; 394 dimm->nr_pages = nr_pages;
395 dimm->grain = nr_pages << PAGE_SHIFT; 395 dimm->grain = nr_pages << PAGE_SHIFT;
396 dimm->mtype = MEM_DDR2; 396 dimm->mtype = MEM_DDR2;
397 dimm->dtype = DEV_UNKNOWN; 397 dimm->dtype = DEV_UNKNOWN;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 39c63757c2a1..6a49dd00b81b 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
1012 /* add the number of COLUMN bits */ 1012 /* add the number of COLUMN bits */
1013 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 1013 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
1014 1014
1015 /* Dual-rank memories have twice the size */
1016 if (dinfo->dual_rank)
1017 addrBits++;
1018
1015 addrBits += 6; /* add 64 bits per DIMM */ 1019 addrBits += 6; /* add 64 bits per DIMM */
1016 addrBits -= 20; /* divide by 2^^20 */ 1020 addrBits -= 20; /* divide by 2^^20 */
1017 addrBits -= 3; /* 8 bits per bytes */ 1021 addrBits -= 3; /* 8 bits per bytes */
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f3b1f9fafa4b..5715b7c2c517 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
513{ 513{
514 struct sbridge_pvt *pvt = mci->pvt_info; 514 struct sbridge_pvt *pvt = mci->pvt_info;
515 struct dimm_info *dimm; 515 struct dimm_info *dimm;
516 int i, j, banks, ranks, rows, cols, size, npages; 516 unsigned i, j, banks, ranks, rows, cols, npages;
517 u64 size;
517 u32 reg; 518 u32 reg;
518 enum edac_type mode; 519 enum edac_type mode;
519 enum mem_type mtype; 520 enum mem_type mtype;
@@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
585 cols = numcol(mtr); 586 cols = numcol(mtr);
586 587
587 /* DDR3 has 8 I/O banks */ 588 /* DDR3 has 8 I/O banks */
588 size = (rows * cols * banks * ranks) >> (20 - 3); 589 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
589 npages = MiB_TO_PAGES(size); 590 npages = MiB_TO_PAGES(size);
590 591
591 edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", 592 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
592 pvt->sbridge_dev->mc, i, j, 593 pvt->sbridge_dev->mc, i, j,
593 size, npages, 594 size, npages,
594 banks, ranks, rows, cols); 595 banks, ranks, rows, cols);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 8a420f13905e..ed94b4ea72e9 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
308{ 308{
309 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); 309 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
310 310
311 __set_gpio_level_p012(group, pin, value);
311 __set_gpio_dir_p012(group, pin, 0); 312 __set_gpio_dir_p012(group, pin, 0);
312 313
313 return 0; 314 return 0;
@@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
318{ 319{
319 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); 320 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
320 321
322 __set_gpio_level_p3(group, pin, value);
321 __set_gpio_dir_p3(group, pin, 0); 323 __set_gpio_dir_p3(group, pin, 0);
322 324
323 return 0; 325 return 0;
@@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
326static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, 328static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
327 int value) 329 int value)
328{ 330{
331 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
332
333 __set_gpo_level_p3(group, pin, value);
329 return 0; 334 return 0;
330} 335}
331 336
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 97a81260485a..8a55beeb8bdc 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,6 +17,34 @@ config DRM_NOUVEAU
17 help 17 help
18 Choose this option for open-source nVidia support. 18 Choose this option for open-source nVidia support.
19 19
20config NOUVEAU_DEBUG
21 int "Maximum debug level"
22 depends on DRM_NOUVEAU
23 range 0 7
24 default 5
25 help
26 Selects the maximum debug level to compile support for.
27
28 0 - fatal
29 1 - error
30 2 - warning
31 3 - info
32 4 - debug
33 5 - trace (recommended)
34 6 - paranoia
35 7 - spam
36
37 The paranoia and spam levels will add a lot of extra checks which
38 may potentially slow down driver operation.
39
40config NOUVEAU_DEBUG_DEFAULT
41 int "Default debug level"
42 depends on DRM_NOUVEAU
43 range 0 7
44 default 3
45 help
46 Selects the default debug level
47
20config DRM_NOUVEAU_BACKLIGHT 48config DRM_NOUVEAU_BACKLIGHT
21 bool "Support for backlight control" 49 bool "Support for backlight control"
22 depends on DRM_NOUVEAU 50 depends on DRM_NOUVEAU
@@ -25,14 +53,6 @@ config DRM_NOUVEAU_BACKLIGHT
25 Say Y here if you want to control the backlight of your display 53 Say Y here if you want to control the backlight of your display
26 (e.g. a laptop panel). 54 (e.g. a laptop panel).
27 55
28config DRM_NOUVEAU_DEBUG
29 bool "Build in Nouveau's debugfs support"
30 depends on DRM_NOUVEAU && DEBUG_FS
31 default y
32 help
33 Say Y here if you want Nouveau to output debugging information
34 via debugfs.
35
36menu "I2C encoder or helper chips" 56menu "I2C encoder or helper chips"
37 depends on DRM && DRM_KMS_HELPER && I2C 57 depends on DRM && DRM_KMS_HELPER && I2C
38 58
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1cece6a78f39..a990df4d6c04 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -3,49 +3,190 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ 6ccflags-y += -I$(src)/core/include
7 nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \ 7ccflags-y += -I$(src)/core
8 nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ 8ccflags-y += -I$(src)
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10nouveau-y := core/core/client.o
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11nouveau-y += core/core/engctx.o
12 nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \ 12nouveau-y += core/core/engine.o
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 13nouveau-y += core/core/enum.o
14 nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \ 14nouveau-y += core/core/gpuobj.o
15 nouveau_abi16.o \ 15nouveau-y += core/core/handle.o
16 nv04_timer.o \ 16nouveau-y += core/core/mm.o
17 nv04_mc.o nv40_mc.o nv50_mc.o \ 17nouveau-y += core/core/namedb.o
18 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ 18nouveau-y += core/core/object.o
19 nv50_fb.o nvc0_fb.o \ 19nouveau-y += core/core/option.o
20 nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \ 20nouveau-y += core/core/parent.o
21 nv84_fifo.o nvc0_fifo.o nve0_fifo.o \ 21nouveau-y += core/core/printk.o
22 nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \ 22nouveau-y += core/core/ramht.o
23 nv04_software.o nv50_software.o nvc0_software.o \ 23nouveau-y += core/core/subdev.o
24 nv04_graph.o nv10_graph.o nv20_graph.o \ 24
25 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \ 25nouveau-y += core/subdev/bar/base.o
26 nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \ 26nouveau-y += core/subdev/bar/nv50.o
27 nv84_crypt.o nv98_crypt.o \ 27nouveau-y += core/subdev/bar/nvc0.o
28 nva3_copy.o nvc0_copy.o \ 28nouveau-y += core/subdev/bios/base.o
29 nv31_mpeg.o nv50_mpeg.o \ 29nouveau-y += core/subdev/bios/bit.o
30 nv84_bsp.o \ 30nouveau-y += core/subdev/bios/conn.o
31 nv84_vp.o \ 31nouveau-y += core/subdev/bios/dcb.o
32 nv98_ppp.o \ 32nouveau-y += core/subdev/bios/dp.o
33 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 33nouveau-y += core/subdev/bios/extdev.o
34 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 34nouveau-y += core/subdev/bios/gpio.o
35 nv04_crtc.o nv04_display.o nv04_cursor.o \ 35nouveau-y += core/subdev/bios/i2c.o
36 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 36nouveau-y += core/subdev/bios/init.o
37 nv50_cursor.o nv50_display.o \ 37nouveau-y += core/subdev/bios/mxm.o
38 nvd0_display.o \ 38nouveau-y += core/subdev/bios/perf.o
39 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ 39nouveau-y += core/subdev/bios/pll.o
40 nv10_gpio.o nv50_gpio.o \ 40nouveau-y += core/subdev/bios/therm.o
41 nv50_calc.o \ 41nouveau-y += core/subdev/clock/nv04.o
42 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \ 42nouveau-y += core/subdev/clock/nv40.o
43 nv50_vram.o nvc0_vram.o \ 43nouveau-y += core/subdev/clock/nv50.o
44 nv50_vm.o nvc0_vm.o nouveau_prime.o 44nouveau-y += core/subdev/clock/nva3.o
45 45nouveau-y += core/subdev/clock/nvc0.o
46nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 46nouveau-y += core/subdev/clock/pllnv04.o
47nouveau-y += core/subdev/clock/pllnva3.o
48nouveau-y += core/subdev/device/base.o
49nouveau-y += core/subdev/device/nv04.o
50nouveau-y += core/subdev/device/nv10.o
51nouveau-y += core/subdev/device/nv20.o
52nouveau-y += core/subdev/device/nv30.o
53nouveau-y += core/subdev/device/nv40.o
54nouveau-y += core/subdev/device/nv50.o
55nouveau-y += core/subdev/device/nvc0.o
56nouveau-y += core/subdev/device/nve0.o
57nouveau-y += core/subdev/devinit/base.o
58nouveau-y += core/subdev/devinit/nv04.o
59nouveau-y += core/subdev/devinit/nv05.o
60nouveau-y += core/subdev/devinit/nv10.o
61nouveau-y += core/subdev/devinit/nv1a.o
62nouveau-y += core/subdev/devinit/nv20.o
63nouveau-y += core/subdev/devinit/nv50.o
64nouveau-y += core/subdev/fb/base.o
65nouveau-y += core/subdev/fb/nv04.o
66nouveau-y += core/subdev/fb/nv10.o
67nouveau-y += core/subdev/fb/nv20.o
68nouveau-y += core/subdev/fb/nv30.o
69nouveau-y += core/subdev/fb/nv40.o
70nouveau-y += core/subdev/fb/nv50.o
71nouveau-y += core/subdev/fb/nvc0.o
72nouveau-y += core/subdev/gpio/base.o
73nouveau-y += core/subdev/gpio/nv10.o
74nouveau-y += core/subdev/gpio/nv50.o
75nouveau-y += core/subdev/gpio/nvd0.o
76nouveau-y += core/subdev/i2c/base.o
77nouveau-y += core/subdev/i2c/aux.o
78nouveau-y += core/subdev/i2c/bit.o
79nouveau-y += core/subdev/ibus/nvc0.o
80nouveau-y += core/subdev/ibus/nve0.o
81nouveau-y += core/subdev/instmem/base.o
82nouveau-y += core/subdev/instmem/nv04.o
83nouveau-y += core/subdev/instmem/nv40.o
84nouveau-y += core/subdev/instmem/nv50.o
85nouveau-y += core/subdev/ltcg/nvc0.o
86nouveau-y += core/subdev/mc/base.o
87nouveau-y += core/subdev/mc/nv04.o
88nouveau-y += core/subdev/mc/nv44.o
89nouveau-y += core/subdev/mc/nv50.o
90nouveau-y += core/subdev/mc/nv98.o
91nouveau-y += core/subdev/mc/nvc0.o
92nouveau-y += core/subdev/mxm/base.o
93nouveau-y += core/subdev/mxm/mxms.o
94nouveau-y += core/subdev/mxm/nv50.o
95nouveau-y += core/subdev/therm/base.o
96nouveau-y += core/subdev/therm/fan.o
97nouveau-y += core/subdev/therm/ic.o
98nouveau-y += core/subdev/therm/nv40.o
99nouveau-y += core/subdev/therm/nv50.o
100nouveau-y += core/subdev/therm/temp.o
101nouveau-y += core/subdev/timer/base.o
102nouveau-y += core/subdev/timer/nv04.o
103nouveau-y += core/subdev/vm/base.o
104nouveau-y += core/subdev/vm/nv04.o
105nouveau-y += core/subdev/vm/nv41.o
106nouveau-y += core/subdev/vm/nv44.o
107nouveau-y += core/subdev/vm/nv50.o
108nouveau-y += core/subdev/vm/nvc0.o
109
110nouveau-y += core/engine/dmaobj/base.o
111nouveau-y += core/engine/dmaobj/nv04.o
112nouveau-y += core/engine/dmaobj/nv50.o
113nouveau-y += core/engine/dmaobj/nvc0.o
114nouveau-y += core/engine/bsp/nv84.o
115nouveau-y += core/engine/copy/nva3.o
116nouveau-y += core/engine/copy/nvc0.o
117nouveau-y += core/engine/copy/nve0.o
118nouveau-y += core/engine/crypt/nv84.o
119nouveau-y += core/engine/crypt/nv98.o
120nouveau-y += core/engine/disp/nv04.o
121nouveau-y += core/engine/disp/nv50.o
122nouveau-y += core/engine/disp/nvd0.o
123nouveau-y += core/engine/disp/vga.o
124nouveau-y += core/engine/fifo/base.o
125nouveau-y += core/engine/fifo/nv04.o
126nouveau-y += core/engine/fifo/nv10.o
127nouveau-y += core/engine/fifo/nv17.o
128nouveau-y += core/engine/fifo/nv40.o
129nouveau-y += core/engine/fifo/nv50.o
130nouveau-y += core/engine/fifo/nv84.o
131nouveau-y += core/engine/fifo/nvc0.o
132nouveau-y += core/engine/fifo/nve0.o
133nouveau-y += core/engine/graph/ctxnv40.o
134nouveau-y += core/engine/graph/ctxnv50.o
135nouveau-y += core/engine/graph/ctxnvc0.o
136nouveau-y += core/engine/graph/ctxnve0.o
137nouveau-y += core/engine/graph/nv04.o
138nouveau-y += core/engine/graph/nv10.o
139nouveau-y += core/engine/graph/nv20.o
140nouveau-y += core/engine/graph/nv25.o
141nouveau-y += core/engine/graph/nv2a.o
142nouveau-y += core/engine/graph/nv30.o
143nouveau-y += core/engine/graph/nv34.o
144nouveau-y += core/engine/graph/nv35.o
145nouveau-y += core/engine/graph/nv40.o
146nouveau-y += core/engine/graph/nv50.o
147nouveau-y += core/engine/graph/nvc0.o
148nouveau-y += core/engine/graph/nve0.o
149nouveau-y += core/engine/mpeg/nv31.o
150nouveau-y += core/engine/mpeg/nv40.o
151nouveau-y += core/engine/mpeg/nv50.o
152nouveau-y += core/engine/mpeg/nv84.o
153nouveau-y += core/engine/ppp/nv98.o
154nouveau-y += core/engine/software/nv04.o
155nouveau-y += core/engine/software/nv10.o
156nouveau-y += core/engine/software/nv50.o
157nouveau-y += core/engine/software/nvc0.o
158nouveau-y += core/engine/vp/nv84.o
159
160# drm/core
161nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
162nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
163nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
164nouveau-y += nouveau_prime.o nouveau_abi16.o
165nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
166
167# drm/kms
168nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
169nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
170nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
171
172# drm/kms/nv04:nv50
173nouveau-y += nouveau_hw.o nouveau_calc.o
174nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
175nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
176
177# drm/kms/nv50-
178nouveau-y += nv50_display.o nvd0_display.o
179nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
180nouveau-y += nv50_evo.o
181
182# drm/pm
183nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
184nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
185nouveau-y += nouveau_mem.o
186
187# other random bits
47nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 188nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
48nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
49nouveau-$(CONFIG_ACPI) += nouveau_acpi.o 189nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
190nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
50 191
51obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o 192obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 000000000000..c617f0480071
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/handle.h>
28#include <core/option.h>
29
30#include <subdev/device.h>
31
32static void
33nouveau_client_dtor(struct nouveau_object *object)
34{
35 struct nouveau_client *client = (void *)object;
36 nouveau_object_ref(NULL, &client->device);
37 nouveau_handle_destroy(client->root);
38 nouveau_namedb_destroy(&client->base);
39}
40
41static struct nouveau_oclass
42nouveau_client_oclass = {
43 .ofuncs = &(struct nouveau_ofuncs) {
44 .dtor = nouveau_client_dtor,
45 },
46};
47
48int
49nouveau_client_create_(const char *name, u64 devname, const char *cfg,
50 const char *dbg, int length, void **pobject)
51{
52 struct nouveau_object *device;
53 struct nouveau_client *client;
54 int ret;
55
56 device = (void *)nouveau_device_find(devname);
57 if (!device)
58 return -ENODEV;
59
60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
61 NV_CLIENT_CLASS, nouveau_device_sclass,
62 0, length, pobject);
63 client = *pobject;
64 if (ret)
65 return ret;
66
67 ret = nouveau_handle_create(nv_object(client), ~0, ~0,
68 nv_object(client), &client->root);
69 if (ret) {
70 nouveau_namedb_destroy(&client->base);
71 return ret;
72 }
73
74 /* prevent init/fini being called, os in in charge of this */
75 atomic_set(&nv_object(client)->usecount, 2);
76
77 nouveau_object_ref(device, &client->device);
78 snprintf(client->name, sizeof(client->name), "%s", name);
79 client->debug = nouveau_dbgopt(dbg, "CLIENT");
80 return 0;
81}
82
83int
84nouveau_client_init(struct nouveau_client *client)
85{
86 int ret;
87 nv_debug(client, "init running\n");
88 ret = nouveau_handle_init(client->root);
89 nv_debug(client, "init completed with %d\n", ret);
90 return ret;
91}
92
93int
94nouveau_client_fini(struct nouveau_client *client, bool suspend)
95{
96 const char *name[2] = { "fini", "suspend" };
97 int ret;
98
99 nv_debug(client, "%s running\n", name[suspend]);
100 ret = nouveau_handle_fini(client->root, suspend);
101 nv_debug(client, "%s completed with %d\n", name[suspend], ret);
102 return ret;
103}
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 000000000000..e41b10d5eb59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/namedb.h>
27#include <core/handle.h>
28#include <core/client.h>
29#include <core/engctx.h>
30
31#include <subdev/vm.h>
32
33static inline int
34nouveau_engctx_exists(struct nouveau_object *parent,
35 struct nouveau_engine *engine, void **pobject)
36{
37 struct nouveau_engctx *engctx;
38 struct nouveau_object *parctx;
39
40 list_for_each_entry(engctx, &engine->contexts, head) {
41 parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
42 if (parctx == parent) {
43 atomic_inc(&nv_object(engctx)->refcount);
44 *pobject = engctx;
45 return 1;
46 }
47 }
48
49 return 0;
50}
51
52int
53nouveau_engctx_create_(struct nouveau_object *parent,
54 struct nouveau_object *engobj,
55 struct nouveau_oclass *oclass,
56 struct nouveau_object *pargpu,
57 u32 size, u32 align, u32 flags,
58 int length, void **pobject)
59{
60 struct nouveau_client *client = nouveau_client(parent);
61 struct nouveau_engine *engine = nv_engine(engobj);
62 struct nouveau_object *engctx;
63 unsigned long save;
64 int ret;
65
66 /* check if this engine already has a context for the parent object,
67 * and reference it instead of creating a new one
68 */
69 spin_lock_irqsave(&engine->lock, save);
70 ret = nouveau_engctx_exists(parent, engine, pobject);
71 spin_unlock_irqrestore(&engine->lock, save);
72 if (ret)
73 return ret;
74
75 /* create the new context, supports creating both raw objects and
76 * objects backed by instance memory
77 */
78 if (size) {
79 ret = nouveau_gpuobj_create_(parent, engobj, oclass,
80 NV_ENGCTX_CLASS,
81 pargpu, size, align, flags,
82 length, pobject);
83 } else {
84 ret = nouveau_object_create_(parent, engobj, oclass,
85 NV_ENGCTX_CLASS, length, pobject);
86 }
87
88 engctx = *pobject;
89 if (ret)
90 return ret;
91
92 /* must take the lock again and re-check a context doesn't already
93 * exist (in case of a race) - the lock had to be dropped before as
94 * it's not possible to allocate the object with it held.
95 */
96 spin_lock_irqsave(&engine->lock, save);
97 ret = nouveau_engctx_exists(parent, engine, pobject);
98 if (ret) {
99 spin_unlock_irqrestore(&engine->lock, save);
100 nouveau_object_ref(NULL, &engctx);
101 return ret;
102 }
103
104 if (client->vm)
105 atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
106 list_add(&nv_engctx(engctx)->head, &engine->contexts);
107 nv_engctx(engctx)->addr = ~0ULL;
108 spin_unlock_irqrestore(&engine->lock, save);
109 return 0;
110}
111
112void
113nouveau_engctx_destroy(struct nouveau_engctx *engctx)
114{
115 struct nouveau_object *engobj = nv_object(engctx)->engine;
116 struct nouveau_engine *engine = nv_engine(engobj);
117 struct nouveau_client *client = nouveau_client(engctx);
118 unsigned long save;
119
120 nouveau_gpuobj_unmap(&engctx->vma);
121 spin_lock_irqsave(&engine->lock, save);
122 list_del(&engctx->head);
123 spin_unlock_irqrestore(&engine->lock, save);
124
125 if (client->vm)
126 atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
127
128 if (engctx->base.size)
129 nouveau_gpuobj_destroy(&engctx->base);
130 else
131 nouveau_object_destroy(&engctx->base.base);
132}
133
134int
135nouveau_engctx_init(struct nouveau_engctx *engctx)
136{
137 struct nouveau_object *object = nv_object(engctx);
138 struct nouveau_subdev *subdev = nv_subdev(object->engine);
139 struct nouveau_object *parent;
140 struct nouveau_subdev *pardev;
141 int ret;
142
143 ret = nouveau_gpuobj_init(&engctx->base);
144 if (ret)
145 return ret;
146
147 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
148 pardev = nv_subdev(parent->engine);
149 if (nv_parent(parent)->context_attach) {
150 mutex_lock(&pardev->mutex);
151 ret = nv_parent(parent)->context_attach(parent, object);
152 mutex_unlock(&pardev->mutex);
153 }
154
155 if (ret) {
156 nv_error(parent, "failed to attach %s context, %d\n",
157 subdev->name, ret);
158 return ret;
159 }
160
161 nv_debug(parent, "attached %s context\n", subdev->name);
162 return 0;
163}
164
165int
166nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
167{
168 struct nouveau_object *object = nv_object(engctx);
169 struct nouveau_subdev *subdev = nv_subdev(object->engine);
170 struct nouveau_object *parent;
171 struct nouveau_subdev *pardev;
172 int ret = 0;
173
174 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
175 pardev = nv_subdev(parent->engine);
176 if (nv_parent(parent)->context_detach) {
177 mutex_lock(&pardev->mutex);
178 ret = nv_parent(parent)->context_detach(parent, suspend, object);
179 mutex_unlock(&pardev->mutex);
180 }
181
182 if (ret) {
183 nv_error(parent, "failed to detach %s context, %d\n",
184 subdev->name, ret);
185 return ret;
186 }
187
188 nv_debug(parent, "detached %s context\n", subdev->name);
189 return nouveau_gpuobj_fini(&engctx->base, suspend);
190}
191
192void
193_nouveau_engctx_dtor(struct nouveau_object *object)
194{
195 nouveau_engctx_destroy(nv_engctx(object));
196}
197
198int
199_nouveau_engctx_init(struct nouveau_object *object)
200{
201 return nouveau_engctx_init(nv_engctx(object));
202}
203
204
205int
206_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
207{
208 return nouveau_engctx_fini(nv_engctx(object), suspend);
209}
210
211struct nouveau_object *
212nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
213{
214 struct nouveau_engctx *engctx;
215 unsigned long flags;
216
217 spin_lock_irqsave(&engine->lock, flags);
218 list_for_each_entry(engctx, &engine->contexts, head) {
219 if (engctx->addr == addr) {
220 engctx->save = flags;
221 return nv_object(engctx);
222 }
223 }
224 spin_unlock_irqrestore(&engine->lock, flags);
225 return NULL;
226}
227
228void
229nouveau_engctx_put(struct nouveau_object *object)
230{
231 if (object) {
232 struct nouveau_engine *engine = nv_engine(object->engine);
233 struct nouveau_engctx *engctx = nv_engctx(object);
234 spin_unlock_irqrestore(&engine->lock, engctx->save);
235 }
236}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 000000000000..09b3bd502fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26#include <core/engine.h>
27#include <core/option.h>
28
29int
30nouveau_engine_create_(struct nouveau_object *parent,
31 struct nouveau_object *engobj,
32 struct nouveau_oclass *oclass, bool enable,
33 const char *iname, const char *fname,
34 int length, void **pobject)
35{
36 struct nouveau_device *device = nv_device(parent);
37 struct nouveau_engine *engine;
38 int ret;
39
40 ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
41 iname, fname, length, pobject);
42 engine = *pobject;
43 if (ret)
44 return ret;
45
46 if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
47 if (!enable)
48 nv_warn(engine, "disabled, %s=1 to enable\n", iname);
49 return -ENODEV;
50 }
51
52 INIT_LIST_HEAD(&engine->contexts);
53 spin_lock_init(&engine->lock);
54 return 0;
55}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index e51b51503baa..7cc7133d82de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -25,27 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/ratelimit.h> 28#include <core/os.h>
29 29#include <core/enum.h>
30#include "nouveau_util.h"
31
32static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
33
34void
35nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
36{
37 while (bf->name) {
38 if (value & bf->mask) {
39 printk(" %s", bf->name);
40 value &= ~bf->mask;
41 }
42
43 bf++;
44 }
45
46 if (value)
47 printk(" (unknown bits 0x%08x)", value);
48}
49 30
50const struct nouveau_enum * 31const struct nouveau_enum *
51nouveau_enum_find(const struct nouveau_enum *en, u32 value) 32nouveau_enum_find(const struct nouveau_enum *en, u32 value)
@@ -63,16 +44,24 @@ void
63nouveau_enum_print(const struct nouveau_enum *en, u32 value) 44nouveau_enum_print(const struct nouveau_enum *en, u32 value)
64{ 45{
65 en = nouveau_enum_find(en, value); 46 en = nouveau_enum_find(en, value);
66 if (en) { 47 if (en)
67 printk("%s", en->name); 48 printk("%s", en->name);
68 return; 49 else
69 } 50 printk("(unknown enum 0x%08x)", value);
70
71 printk("(unknown enum 0x%08x)", value);
72} 51}
73 52
74int 53void
75nouveau_ratelimit(void) 54nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
76{ 55{
77 return __ratelimit(&nouveau_ratelimit_state); 56 while (bf->name) {
57 if (value & bf->mask) {
58 printk(" %s", bf->name);
59 value &= ~bf->mask;
60 }
61
62 bf++;
63 }
64
65 if (value)
66 printk(" (unknown bits 0x%08x)", value);
78} 67}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 000000000000..1f34549aff18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,318 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/gpuobj.h>
27
28#include <subdev/instmem.h>
29#include <subdev/bar.h>
30#include <subdev/vm.h>
31
32void
33nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
34{
35 int i;
36
37 if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
38 for (i = 0; i < gpuobj->size; i += 4)
39 nv_wo32(gpuobj, i, 0x00000000);
40 }
41
42 if (gpuobj->heap.block_size)
43 nouveau_mm_fini(&gpuobj->heap);
44
45 nouveau_object_destroy(&gpuobj->base);
46}
47
48int
49nouveau_gpuobj_create_(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, u32 pclass,
52 struct nouveau_object *pargpu,
53 u32 size, u32 align, u32 flags,
54 int length, void **pobject)
55{
56 struct nouveau_instmem *imem = nouveau_instmem(parent);
57 struct nouveau_bar *bar = nouveau_bar(parent);
58 struct nouveau_gpuobj *gpuobj;
59 struct nouveau_mm *heap = NULL;
60 int ret, i;
61 u64 addr;
62
63 *pobject = NULL;
64
65 if (pargpu) {
66 while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
67 if (nv_gpuobj(pargpu)->heap.block_size)
68 break;
69 pargpu = pargpu->parent;
70 }
71
72 if (unlikely(pargpu == NULL)) {
73 nv_error(parent, "no gpuobj heap\n");
74 return -EINVAL;
75 }
76
77 addr = nv_gpuobj(pargpu)->addr;
78 heap = &nv_gpuobj(pargpu)->heap;
79 atomic_inc(&parent->refcount);
80 } else {
81 ret = imem->alloc(imem, parent, size, align, &parent);
82 pargpu = parent;
83 if (ret)
84 return ret;
85
86 addr = nv_memobj(pargpu)->addr;
87 size = nv_memobj(pargpu)->size;
88
89 if (bar && bar->alloc) {
90 struct nouveau_instobj *iobj = (void *)parent;
91 struct nouveau_mem **mem = (void *)(iobj + 1);
92 struct nouveau_mem *node = *mem;
93 if (!bar->alloc(bar, parent, node, &pargpu)) {
94 nouveau_object_ref(NULL, &parent);
95 parent = pargpu;
96 }
97 }
98 }
99
100 ret = nouveau_object_create_(parent, engine, oclass, pclass |
101 NV_GPUOBJ_CLASS, length, pobject);
102 nouveau_object_ref(NULL, &parent);
103 gpuobj = *pobject;
104 if (ret)
105 return ret;
106
107 gpuobj->parent = pargpu;
108 gpuobj->flags = flags;
109 gpuobj->addr = addr;
110 gpuobj->size = size;
111
112 if (heap) {
113 ret = nouveau_mm_head(heap, 1, size, size,
114 max(align, (u32)1), &gpuobj->node);
115 if (ret)
116 return ret;
117
118 gpuobj->addr += gpuobj->node->offset;
119 }
120
121 if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
122 ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
123 if (ret)
124 return ret;
125 }
126
127 if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
128 for (i = 0; i < gpuobj->size; i += 4)
129 nv_wo32(gpuobj, i, 0x00000000);
130 }
131
132 return ret;
133}
134
135struct nouveau_gpuobj_class {
136 struct nouveau_object *pargpu;
137 u64 size;
138 u32 align;
139 u32 flags;
140};
141
142static int
143_nouveau_gpuobj_ctor(struct nouveau_object *parent,
144 struct nouveau_object *engine,
145 struct nouveau_oclass *oclass, void *data, u32 size,
146 struct nouveau_object **pobject)
147{
148 struct nouveau_gpuobj_class *args = data;
149 struct nouveau_gpuobj *object;
150 int ret;
151
152 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
153 args->size, args->align, args->flags,
154 &object);
155 *pobject = nv_object(object);
156 if (ret)
157 return ret;
158
159 return 0;
160}
161
162void
163_nouveau_gpuobj_dtor(struct nouveau_object *object)
164{
165 nouveau_gpuobj_destroy(nv_gpuobj(object));
166}
167
168int
169_nouveau_gpuobj_init(struct nouveau_object *object)
170{
171 return nouveau_gpuobj_init(nv_gpuobj(object));
172}
173
174int
175_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
176{
177 return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
178}
179
180u32
181_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
182{
183 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
184 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
185 if (gpuobj->node)
186 addr += gpuobj->node->offset;
187 return pfuncs->rd32(gpuobj->parent, addr);
188}
189
190void
191_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
192{
193 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
194 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
195 if (gpuobj->node)
196 addr += gpuobj->node->offset;
197 pfuncs->wr32(gpuobj->parent, addr, data);
198}
199
200static struct nouveau_oclass
201_nouveau_gpuobj_oclass = {
202 .handle = 0x00000000,
203 .ofuncs = &(struct nouveau_ofuncs) {
204 .ctor = _nouveau_gpuobj_ctor,
205 .dtor = _nouveau_gpuobj_dtor,
206 .init = _nouveau_gpuobj_init,
207 .fini = _nouveau_gpuobj_fini,
208 .rd32 = _nouveau_gpuobj_rd32,
209 .wr32 = _nouveau_gpuobj_wr32,
210 },
211};
212
213int
214nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
215 u32 size, u32 align, u32 flags,
216 struct nouveau_gpuobj **pgpuobj)
217{
218 struct nouveau_object *engine = parent;
219 struct nouveau_gpuobj_class args = {
220 .pargpu = pargpu,
221 .size = size,
222 .align = align,
223 .flags = flags,
224 };
225
226 if (!nv_iclass(engine, NV_SUBDEV_CLASS))
227 engine = engine->engine;
228 BUG_ON(engine == NULL);
229
230 return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
231 &args, sizeof(args),
232 (struct nouveau_object **)pgpuobj);
233}
234
235int
236nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
237 struct nouveau_vma *vma)
238{
239 struct nouveau_bar *bar = nouveau_bar(gpuobj);
240 int ret = -EINVAL;
241
242 if (bar && bar->umap) {
243 struct nouveau_instobj *iobj = (void *)
244 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
245 struct nouveau_mem **mem = (void *)(iobj + 1);
246 ret = bar->umap(bar, *mem, access, vma);
247 }
248
249 return ret;
250}
251
252int
253nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
254 u32 access, struct nouveau_vma *vma)
255{
256 struct nouveau_instobj *iobj = (void *)
257 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
258 struct nouveau_mem **mem = (void *)(iobj + 1);
259 int ret;
260
261 ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
262 if (ret)
263 return ret;
264
265 nouveau_vm_map(vma, *mem);
266 return 0;
267}
268
269void
270nouveau_gpuobj_unmap(struct nouveau_vma *vma)
271{
272 if (vma->node) {
273 nouveau_vm_unmap(vma);
274 nouveau_vm_put(vma);
275 }
276}
277
278/* the below is basically only here to support sharing the paged dma object
279 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
280 * anywhere else.
281 */
282
283static void
284nouveau_gpudup_dtor(struct nouveau_object *object)
285{
286 struct nouveau_gpuobj *gpuobj = (void *)object;
287 nouveau_object_ref(NULL, &gpuobj->parent);
288 nouveau_object_destroy(&gpuobj->base);
289}
290
291static struct nouveau_oclass
292nouveau_gpudup_oclass = {
293 .handle = NV_GPUOBJ_CLASS,
294 .ofuncs = &(struct nouveau_ofuncs) {
295 .dtor = nouveau_gpudup_dtor,
296 .init = nouveau_object_init,
297 .fini = nouveau_object_fini,
298 },
299};
300
301int
302nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
303 struct nouveau_gpuobj **pgpuobj)
304{
305 struct nouveau_gpuobj *gpuobj;
306 int ret;
307
308 ret = nouveau_object_create(parent, parent->engine,
309 &nouveau_gpudup_oclass, 0, &gpuobj);
310 *pgpuobj = gpuobj;
311 if (ret)
312 return ret;
313
314 nouveau_object_ref(nv_object(base), &gpuobj->parent);
315 gpuobj->addr = base->addr;
316 gpuobj->size = base->size;
317 return 0;
318}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 000000000000..b8d2cbf8a7a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/handle.h>
27#include <core/client.h>
28
29#define hprintk(h,l,f,a...) do { \
30 struct nouveau_client *c = nouveau_client((h)->object); \
31 struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
32 nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
33} while(0)
34
35int
36nouveau_handle_init(struct nouveau_handle *handle)
37{
38 struct nouveau_handle *item;
39 int ret;
40
41 hprintk(handle, TRACE, "init running\n");
42 ret = nouveau_object_inc(handle->object);
43 if (ret)
44 return ret;
45
46 hprintk(handle, TRACE, "init children\n");
47 list_for_each_entry(item, &handle->tree, head) {
48 ret = nouveau_handle_init(item);
49 if (ret)
50 goto fail;
51 }
52
53 hprintk(handle, TRACE, "init completed\n");
54 return 0;
55fail:
56 hprintk(handle, ERROR, "init failed with %d\n", ret);
57 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
58 nouveau_handle_fini(item, false);
59 }
60
61 nouveau_object_dec(handle->object, false);
62 return ret;
63}
64
65int
66nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
67{
68 static char *name[2] = { "fini", "suspend" };
69 struct nouveau_handle *item;
70 int ret;
71
72 hprintk(handle, TRACE, "%s children\n", name[suspend]);
73 list_for_each_entry(item, &handle->tree, head) {
74 ret = nouveau_handle_fini(item, suspend);
75 if (ret && suspend)
76 goto fail;
77 }
78
79 hprintk(handle, TRACE, "%s running\n", name[suspend]);
80 if (handle->object) {
81 ret = nouveau_object_dec(handle->object, suspend);
82 if (ret && suspend)
83 goto fail;
84 }
85
86 hprintk(handle, TRACE, "%s completed\n", name[suspend]);
87 return 0;
88fail:
89 hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
90 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
91 int rret = nouveau_handle_init(item);
92 if (rret)
93 hprintk(handle, FATAL, "failed to restart, %d\n", rret);
94 }
95
96 return ret;
97}
98
99int
100nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
101 struct nouveau_object *object,
102 struct nouveau_handle **phandle)
103{
104 struct nouveau_object *namedb;
105 struct nouveau_handle *handle;
106 int ret;
107
108 namedb = parent;
109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
110 namedb = namedb->parent;
111
112 handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
113 if (!handle)
114 return -ENOMEM;
115
116 INIT_LIST_HEAD(&handle->head);
117 INIT_LIST_HEAD(&handle->tree);
118 handle->name = _handle;
119 handle->priv = ~0;
120
121 ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
122 if (ret) {
123 kfree(handle);
124 return ret;
125 }
126
127 if (nv_parent(parent)->object_attach) {
128 ret = nv_parent(parent)->object_attach(parent, object, _handle);
129 if (ret < 0) {
130 nouveau_handle_destroy(handle);
131 return ret;
132 }
133
134 handle->priv = ret;
135 }
136
137 if (object != namedb) {
138 while (!nv_iclass(namedb, NV_CLIENT_CLASS))
139 namedb = namedb->parent;
140
141 handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
142 if (handle->parent) {
143 list_add(&handle->head, &handle->parent->tree);
144 nouveau_namedb_put(handle->parent);
145 }
146 }
147
148 hprintk(handle, TRACE, "created\n");
149 return 0;
150}
151
152void
153nouveau_handle_destroy(struct nouveau_handle *handle)
154{
155 struct nouveau_handle *item, *temp;
156
157 hprintk(handle, TRACE, "destroy running\n");
158 list_for_each_entry_safe(item, temp, &handle->tree, head) {
159 nouveau_handle_destroy(item);
160 }
161 list_del(&handle->head);
162
163 if (handle->priv != ~0) {
164 struct nouveau_object *parent = handle->parent->object;
165 nv_parent(parent)->object_detach(parent, handle->priv);
166 }
167
168 hprintk(handle, TRACE, "destroy completed\n");
169 nouveau_namedb_remove(handle);
170 kfree(handle);
171}
172
173struct nouveau_object *
174nouveau_handle_ref(struct nouveau_object *parent, u32 name)
175{
176 struct nouveau_object *object = NULL;
177 struct nouveau_handle *handle;
178
179 while (!nv_iclass(parent, NV_NAMEDB_CLASS))
180 parent = parent->parent;
181
182 handle = nouveau_namedb_get(nv_namedb(parent), name);
183 if (handle) {
184 nouveau_object_ref(handle->object, &object);
185 nouveau_namedb_put(handle);
186 }
187
188 return object;
189}
190
191struct nouveau_handle *
192nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
193{
194 struct nouveau_namedb *namedb;
195 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
196 return nouveau_namedb_get_class(namedb, oclass);
197 return NULL;
198}
199
200struct nouveau_handle *
201nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
202{
203 struct nouveau_namedb *namedb;
204 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
205 return nouveau_namedb_get_vinst(namedb, vinst);
206 return NULL;
207}
208
209struct nouveau_handle *
210nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
211{
212 struct nouveau_namedb *namedb;
213 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
214 return nouveau_namedb_get_cinst(namedb, cinst);
215 return NULL;
216}
217
218void
219nouveau_handle_put(struct nouveau_handle *handle)
220{
221 if (handle)
222 nouveau_namedb_put(handle);
223}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index b29ffb3d1408..bfddf87926dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,20 +22,52 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "core/os.h"
26#include "nouveau_drv.h" 26#include "core/mm.h"
27#include "nouveau_mm.h"
28 27
29static inline void 28#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
30region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a) 29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
30
31void
32nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
31{ 33{
32 list_del(&a->nl_entry); 34 struct nouveau_mm_node *this = *pthis;
33 list_del(&a->fl_entry); 35
34 kfree(a); 36 if (this) {
37 struct nouveau_mm_node *prev = node(this, prev);
38 struct nouveau_mm_node *next = node(this, next);
39
40 if (prev && prev->type == 0) {
41 prev->length += this->length;
42 list_del(&this->nl_entry);
43 kfree(this); this = prev;
44 }
45
46 if (next && next->type == 0) {
47 next->offset = this->offset;
48 next->length += this->length;
49 if (this->type == 0)
50 list_del(&this->fl_entry);
51 list_del(&this->nl_entry);
52 kfree(this); this = NULL;
53 }
54
55 if (this && this->type != 0) {
56 list_for_each_entry(prev, &mm->free, fl_entry) {
57 if (this->offset < prev->offset)
58 break;
59 }
60
61 list_add_tail(&this->fl_entry, &prev->fl_entry);
62 this->type = 0;
63 }
64 }
65
66 *pthis = NULL;
35} 67}
36 68
37static struct nouveau_mm_node * 69static struct nouveau_mm_node *
38region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) 70region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
39{ 71{
40 struct nouveau_mm_node *b; 72 struct nouveau_mm_node *b;
41 73
@@ -57,38 +89,12 @@ region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
57 return b; 89 return b;
58} 90}
59 91
60#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
62
63void
64nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
65{
66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
68
69 list_add(&this->fl_entry, &mm->free);
70 this->type = 0;
71
72 if (prev && prev->type == 0) {
73 prev->length += this->length;
74 region_put(mm, this);
75 this = prev;
76 }
77
78 if (next && next->type == 0) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(mm, this);
82 }
83}
84
85int 92int
86nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc, 93nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
87 u32 align, struct nouveau_mm_node **pnode) 94 u32 align, struct nouveau_mm_node **pnode)
88{ 95{
89 struct nouveau_mm_node *prev, *this, *next; 96 struct nouveau_mm_node *prev, *this, *next;
90 u32 min = size_nc ? size_nc : size; 97 u32 mask = align - 1;
91 u32 align_mask = align - 1;
92 u32 splitoff; 98 u32 splitoff;
93 u32 s, e; 99 u32 s, e;
94 100
@@ -104,16 +110,86 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
104 if (next && next->type != type) 110 if (next && next->type != type)
105 e = rounddown(e, mm->block_size); 111 e = rounddown(e, mm->block_size);
106 112
107 s = (s + align_mask) & ~align_mask; 113 s = (s + mask) & ~mask;
108 e &= ~align_mask; 114 e &= ~mask;
109 if (s > e || e - s < min) 115 if (s > e || e - s < size_min)
110 continue; 116 continue;
111 117
112 splitoff = s - this->offset; 118 splitoff = s - this->offset;
113 if (splitoff && !region_split(mm, this, splitoff)) 119 if (splitoff && !region_head(mm, this, splitoff))
120 return -ENOMEM;
121
122 this = region_head(mm, this, min(size_max, e - s));
123 if (!this)
124 return -ENOMEM;
125
126 this->type = type;
127 list_del(&this->fl_entry);
128 *pnode = this;
129 return 0;
130 }
131
132 return -ENOSPC;
133}
134
135static struct nouveau_mm_node *
136region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
137{
138 struct nouveau_mm_node *b;
139
140 if (a->length == size)
141 return a;
142
143 b = kmalloc(sizeof(*b), GFP_KERNEL);
144 if (unlikely(b == NULL))
145 return NULL;
146
147 a->length -= size;
148 b->offset = a->offset + a->length;
149 b->length = size;
150 b->type = a->type;
151
152 list_add(&b->nl_entry, &a->nl_entry);
153 if (b->type == 0)
154 list_add(&b->fl_entry, &a->fl_entry);
155 return b;
156}
157
158int
159nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
160 u32 align, struct nouveau_mm_node **pnode)
161{
162 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1;
164
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length;
167 u32 s = this->offset;
168 u32 c = 0, a;
169
170 prev = node(this, prev);
171 if (prev && prev->type != type)
172 s = roundup(s, mm->block_size);
173
174 next = node(this, next);
175 if (next && next->type != type) {
176 e = rounddown(e, mm->block_size);
177 c = next->offset - e;
178 }
179
180 s = (s + mask) & ~mask;
181 a = e - s;
182 if (s > e || a < size_min)
183 continue;
184
185 a = min(a, size_max);
186 s = (e - a) & ~mask;
187 c += (e - s) - a;
188
189 if (c && !region_tail(mm, this, c))
114 return -ENOMEM; 190 return -ENOMEM;
115 191
116 this = region_split(mm, this, min(size, e - s)); 192 this = region_tail(mm, this, a);
117 if (!this) 193 if (!this)
118 return -ENOMEM; 194 return -ENOMEM;
119 195
@@ -148,6 +224,7 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
148 list_add_tail(&node->nl_entry, &mm->nodes); 224 list_add_tail(&node->nl_entry, &mm->nodes);
149 list_add_tail(&node->fl_entry, &mm->free); 225 list_add_tail(&node->fl_entry, &mm->free);
150 mm->heap_nodes++; 226 mm->heap_nodes++;
227 mm->heap_size += length;
151 return 0; 228 return 0;
152} 229}
153 230
@@ -159,15 +236,8 @@ nouveau_mm_fini(struct nouveau_mm *mm)
159 int nodes = 0; 236 int nodes = 0;
160 237
161 list_for_each_entry(node, &mm->nodes, nl_entry) { 238 list_for_each_entry(node, &mm->nodes, nl_entry) {
162 if (nodes++ == mm->heap_nodes) { 239 if (nodes++ == mm->heap_nodes)
163 printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
164 list_for_each_entry(node, &mm->nodes, nl_entry) {
165 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
166 node->type, node->offset, node->length);
167 }
168 WARN_ON(1);
169 return -EBUSY; 240 return -EBUSY;
170 }
171 } 241 }
172 242
173 kfree(heap); 243 kfree(heap);
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 000000000000..1ce95a8709df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/namedb.h>
27#include <core/handle.h>
28#include <core/gpuobj.h>
29
30static struct nouveau_handle *
31nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
32{
33 struct nouveau_handle *handle;
34
35 list_for_each_entry(handle, &namedb->list, node) {
36 if (handle->name == name)
37 return handle;
38 }
39
40 return NULL;
41}
42
43static struct nouveau_handle *
44nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
45{
46 struct nouveau_handle *handle;
47
48 list_for_each_entry(handle, &namedb->list, node) {
49 if (nv_mclass(handle->object) == oclass)
50 return handle;
51 }
52
53 return NULL;
54}
55
56static struct nouveau_handle *
57nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
58{
59 struct nouveau_handle *handle;
60
61 list_for_each_entry(handle, &namedb->list, node) {
62 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
63 if (nv_gpuobj(handle->object)->addr == vinst)
64 return handle;
65 }
66 }
67
68 return NULL;
69}
70
71static struct nouveau_handle *
72nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
73{
74 struct nouveau_handle *handle;
75
76 list_for_each_entry(handle, &namedb->list, node) {
77 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
78 if (nv_gpuobj(handle->object)->node &&
79 nv_gpuobj(handle->object)->node->offset == cinst)
80 return handle;
81 }
82 }
83
84 return NULL;
85}
86
87int
88nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
89 struct nouveau_object *object,
90 struct nouveau_handle *handle)
91{
92 int ret = -EEXIST;
93 write_lock_irq(&namedb->lock);
94 if (!nouveau_namedb_lookup(namedb, name)) {
95 nouveau_object_ref(object, &handle->object);
96 handle->namedb = namedb;
97 list_add(&handle->node, &namedb->list);
98 ret = 0;
99 }
100 write_unlock_irq(&namedb->lock);
101 return ret;
102}
103
104void
105nouveau_namedb_remove(struct nouveau_handle *handle)
106{
107 struct nouveau_namedb *namedb = handle->namedb;
108 struct nouveau_object *object = handle->object;
109 write_lock_irq(&namedb->lock);
110 list_del(&handle->node);
111 write_unlock_irq(&namedb->lock);
112 nouveau_object_ref(NULL, &object);
113}
114
115struct nouveau_handle *
116nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
117{
118 struct nouveau_handle *handle;
119 read_lock(&namedb->lock);
120 handle = nouveau_namedb_lookup(namedb, name);
121 if (handle == NULL)
122 read_unlock(&namedb->lock);
123 return handle;
124}
125
126struct nouveau_handle *
127nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
128{
129 struct nouveau_handle *handle;
130 read_lock(&namedb->lock);
131 handle = nouveau_namedb_lookup_class(namedb, oclass);
132 if (handle == NULL)
133 read_unlock(&namedb->lock);
134 return handle;
135}
136
137struct nouveau_handle *
138nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
139{
140 struct nouveau_handle *handle;
141 read_lock(&namedb->lock);
142 handle = nouveau_namedb_lookup_vinst(namedb, vinst);
143 if (handle == NULL)
144 read_unlock(&namedb->lock);
145 return handle;
146}
147
148struct nouveau_handle *
149nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
150{
151 struct nouveau_handle *handle;
152 read_lock(&namedb->lock);
153 handle = nouveau_namedb_lookup_cinst(namedb, cinst);
154 if (handle == NULL)
155 read_unlock(&namedb->lock);
156 return handle;
157}
158
159void
160nouveau_namedb_put(struct nouveau_handle *handle)
161{
162 if (handle)
163 read_unlock(&handle->namedb->lock);
164}
165
166int
167nouveau_namedb_create_(struct nouveau_object *parent,
168 struct nouveau_object *engine,
169 struct nouveau_oclass *oclass, u32 pclass,
170 struct nouveau_oclass *sclass, u32 engcls,
171 int length, void **pobject)
172{
173 struct nouveau_namedb *namedb;
174 int ret;
175
176 ret = nouveau_parent_create_(parent, engine, oclass, pclass |
177 NV_NAMEDB_CLASS, sclass, engcls,
178 length, pobject);
179 namedb = *pobject;
180 if (ret)
181 return ret;
182
183 rwlock_init(&namedb->lock);
184 INIT_LIST_HEAD(&namedb->list);
185 return 0;
186}
187
188int
189_nouveau_namedb_ctor(struct nouveau_object *parent,
190 struct nouveau_object *engine,
191 struct nouveau_oclass *oclass, void *data, u32 size,
192 struct nouveau_object **pobject)
193{
194 struct nouveau_namedb *object;
195 int ret;
196
197 ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
198 *pobject = nv_object(object);
199 if (ret)
200 return ret;
201
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 000000000000..0daab62ea14c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/parent.h>
27#include <core/namedb.h>
28#include <core/handle.h>
29#include <core/engine.h>
30
31#ifdef NOUVEAU_OBJECT_MAGIC
32static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
33static DEFINE_SPINLOCK(_objlist_lock);
34#endif
35
36int
37nouveau_object_create_(struct nouveau_object *parent,
38 struct nouveau_object *engine,
39 struct nouveau_oclass *oclass, u32 pclass,
40 int size, void **pobject)
41{
42 struct nouveau_object *object;
43
44 object = *pobject = kzalloc(size, GFP_KERNEL);
45 if (!object)
46 return -ENOMEM;
47
48 nouveau_object_ref(parent, &object->parent);
49 nouveau_object_ref(engine, &object->engine);
50 object->oclass = oclass;
51 object->oclass->handle |= pclass;
52 atomic_set(&object->refcount, 1);
53 atomic_set(&object->usecount, 0);
54
55#ifdef NOUVEAU_OBJECT_MAGIC
56 object->_magic = NOUVEAU_OBJECT_MAGIC;
57 spin_lock(&_objlist_lock);
58 list_add(&object->list, &_objlist);
59 spin_unlock(&_objlist_lock);
60#endif
61 return 0;
62}
63
64static int
65_nouveau_object_ctor(struct nouveau_object *parent,
66 struct nouveau_object *engine,
67 struct nouveau_oclass *oclass, void *data, u32 size,
68 struct nouveau_object **pobject)
69{
70 struct nouveau_object *object;
71 int ret;
72
73 ret = nouveau_object_create(parent, engine, oclass, 0, &object);
74 *pobject = nv_object(object);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81void
82nouveau_object_destroy(struct nouveau_object *object)
83{
84#ifdef NOUVEAU_OBJECT_MAGIC
85 spin_lock(&_objlist_lock);
86 list_del(&object->list);
87 spin_unlock(&_objlist_lock);
88#endif
89 nouveau_object_ref(NULL, &object->engine);
90 nouveau_object_ref(NULL, &object->parent);
91 kfree(object);
92}
93
94static void
95_nouveau_object_dtor(struct nouveau_object *object)
96{
97 nouveau_object_destroy(object);
98}
99
100int
101nouveau_object_init(struct nouveau_object *object)
102{
103 return 0;
104}
105
106static int
107_nouveau_object_init(struct nouveau_object *object)
108{
109 return nouveau_object_init(object);
110}
111
112int
113nouveau_object_fini(struct nouveau_object *object, bool suspend)
114{
115 return 0;
116}
117
118static int
119_nouveau_object_fini(struct nouveau_object *object, bool suspend)
120{
121 return nouveau_object_fini(object, suspend);
122}
123
124struct nouveau_ofuncs
125nouveau_object_ofuncs = {
126 .ctor = _nouveau_object_ctor,
127 .dtor = _nouveau_object_dtor,
128 .init = _nouveau_object_init,
129 .fini = _nouveau_object_fini,
130};
131
132int
133nouveau_object_ctor(struct nouveau_object *parent,
134 struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
139 int ret;
140
141 *pobject = NULL;
142
143 ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
144 if (ret < 0) {
145 if (ret != -ENODEV) {
146 nv_error(parent, "failed to create 0x%08x, %d\n",
147 oclass->handle, ret);
148 }
149
150 if (*pobject) {
151 ofuncs->dtor(*pobject);
152 *pobject = NULL;
153 }
154
155 return ret;
156 }
157
158 nv_debug(*pobject, "created\n");
159 return 0;
160}
161
162static void
163nouveau_object_dtor(struct nouveau_object *object)
164{
165 nv_debug(object, "destroying\n");
166 nv_ofuncs(object)->dtor(object);
167}
168
169void
170nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
171{
172 if (obj) {
173 atomic_inc(&obj->refcount);
174 nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
175 }
176
177 if (*ref) {
178 int dead = atomic_dec_and_test(&(*ref)->refcount);
179 nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
180 if (dead)
181 nouveau_object_dtor(*ref);
182 }
183
184 *ref = obj;
185}
186
187int
188nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
189 u16 _oclass, void *data, u32 size,
190 struct nouveau_object **pobject)
191{
192 struct nouveau_object *parent = NULL;
193 struct nouveau_object *engctx = NULL;
194 struct nouveau_object *object = NULL;
195 struct nouveau_object *engine;
196 struct nouveau_oclass *oclass;
197 struct nouveau_handle *handle;
198 int ret;
199
200 /* lookup parent object and ensure it *is* a parent */
201 parent = nouveau_handle_ref(client, _parent);
202 if (!parent) {
203 nv_error(client, "parent 0x%08x not found\n", _parent);
204 return -ENOENT;
205 }
206
207 if (!nv_iclass(parent, NV_PARENT_CLASS)) {
208 nv_error(parent, "cannot have children\n");
209 ret = -EINVAL;
210 goto fail_class;
211 }
212
213 /* check that parent supports the requested subclass */
214 ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
215 if (ret) {
216 nv_debug(parent, "illegal class 0x%04x\n", _oclass);
217 goto fail_class;
218 }
219
220 /* make sure engine init has been completed *before* any objects
221 * it controls are created - the constructors may depend on
222 * state calculated at init (ie. default context construction)
223 */
224 if (engine) {
225 ret = nouveau_object_inc(engine);
226 if (ret)
227 goto fail_class;
228 }
229
230 /* if engine requires it, create a context object to insert
231 * between the parent and its children (eg. PGRAPH context)
232 */
233 if (engine && nv_engine(engine)->cclass) {
234 ret = nouveau_object_ctor(parent, engine,
235 nv_engine(engine)->cclass,
236 data, size, &engctx);
237 if (ret)
238 goto fail_engctx;
239 } else {
240 nouveau_object_ref(parent, &engctx);
241 }
242
243 /* finally, create new object and bind it to its handle */
244 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
245 *pobject = object;
246 if (ret)
247 goto fail_ctor;
248
249 ret = nouveau_object_inc(object);
250 if (ret)
251 goto fail_init;
252
253 ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
254 if (ret)
255 goto fail_handle;
256
257 ret = nouveau_handle_init(handle);
258 if (ret)
259 nouveau_handle_destroy(handle);
260
261fail_handle:
262 nouveau_object_dec(object, false);
263fail_init:
264 nouveau_object_ref(NULL, &object);
265fail_ctor:
266 nouveau_object_ref(NULL, &engctx);
267fail_engctx:
268 if (engine)
269 nouveau_object_dec(engine, false);
270fail_class:
271 nouveau_object_ref(NULL, &parent);
272 return ret;
273}
274
275int
276nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
277{
278 struct nouveau_object *parent = NULL;
279 struct nouveau_object *namedb = NULL;
280 struct nouveau_handle *handle = NULL;
281 int ret = -EINVAL;
282
283 parent = nouveau_handle_ref(client, _parent);
284 if (!parent)
285 return -ENOENT;
286
287 namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
288 if (namedb) {
289 handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
290 if (handle) {
291 nouveau_namedb_put(handle);
292 nouveau_handle_fini(handle, false);
293 nouveau_handle_destroy(handle);
294 }
295 }
296
297 nouveau_object_ref(NULL, &parent);
298 return ret;
299}
300
301int
302nouveau_object_inc(struct nouveau_object *object)
303{
304 int ref = atomic_add_return(1, &object->usecount);
305 int ret;
306
307 nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
308 if (ref != 1)
309 return 0;
310
311 nv_trace(object, "initialising...\n");
312 if (object->parent) {
313 ret = nouveau_object_inc(object->parent);
314 if (ret) {
315 nv_error(object, "parent failed, %d\n", ret);
316 goto fail_parent;
317 }
318 }
319
320 if (object->engine) {
321 mutex_lock(&nv_subdev(object->engine)->mutex);
322 ret = nouveau_object_inc(object->engine);
323 mutex_unlock(&nv_subdev(object->engine)->mutex);
324 if (ret) {
325 nv_error(object, "engine failed, %d\n", ret);
326 goto fail_engine;
327 }
328 }
329
330 ret = nv_ofuncs(object)->init(object);
331 if (ret) {
332 nv_error(object, "init failed, %d\n", ret);
333 goto fail_self;
334 }
335
336 nv_debug(object, "initialised\n");
337 return 0;
338
339fail_self:
340 if (object->engine) {
341 mutex_lock(&nv_subdev(object->engine)->mutex);
342 nouveau_object_dec(object->engine, false);
343 mutex_unlock(&nv_subdev(object->engine)->mutex);
344 }
345fail_engine:
346 if (object->parent)
347 nouveau_object_dec(object->parent, false);
348fail_parent:
349 atomic_dec(&object->usecount);
350 return ret;
351}
352
353static int
354nouveau_object_decf(struct nouveau_object *object)
355{
356 int ret;
357
358 nv_trace(object, "stopping...\n");
359
360 ret = nv_ofuncs(object)->fini(object, false);
361 if (ret)
362 nv_warn(object, "failed fini, %d\n", ret);
363
364 if (object->engine) {
365 mutex_lock(&nv_subdev(object->engine)->mutex);
366 nouveau_object_dec(object->engine, false);
367 mutex_unlock(&nv_subdev(object->engine)->mutex);
368 }
369
370 if (object->parent)
371 nouveau_object_dec(object->parent, false);
372
373 nv_debug(object, "stopped\n");
374 return 0;
375}
376
377static int
378nouveau_object_decs(struct nouveau_object *object)
379{
380 int ret, rret;
381
382 nv_trace(object, "suspending...\n");
383
384 ret = nv_ofuncs(object)->fini(object, true);
385 if (ret) {
386 nv_error(object, "failed suspend, %d\n", ret);
387 return ret;
388 }
389
390 if (object->engine) {
391 mutex_lock(&nv_subdev(object->engine)->mutex);
392 ret = nouveau_object_dec(object->engine, true);
393 mutex_unlock(&nv_subdev(object->engine)->mutex);
394 if (ret) {
395 nv_warn(object, "engine failed suspend, %d\n", ret);
396 goto fail_engine;
397 }
398 }
399
400 if (object->parent) {
401 ret = nouveau_object_dec(object->parent, true);
402 if (ret) {
403 nv_warn(object, "parent failed suspend, %d\n", ret);
404 goto fail_parent;
405 }
406 }
407
408 nv_debug(object, "suspended\n");
409 return 0;
410
411fail_parent:
412 if (object->engine) {
413 mutex_lock(&nv_subdev(object->engine)->mutex);
414 rret = nouveau_object_inc(object->engine);
415 mutex_unlock(&nv_subdev(object->engine)->mutex);
416 if (rret)
417 nv_fatal(object, "engine failed to reinit, %d\n", rret);
418 }
419
420fail_engine:
421 rret = nv_ofuncs(object)->init(object);
422 if (rret)
423 nv_fatal(object, "failed to reinit, %d\n", rret);
424
425 return ret;
426}
427
428int
429nouveau_object_dec(struct nouveau_object *object, bool suspend)
430{
431 int ref = atomic_add_return(-1, &object->usecount);
432 int ret;
433
434 nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
435
436 if (ref == 0) {
437 if (suspend)
438 ret = nouveau_object_decs(object);
439 else
440 ret = nouveau_object_decf(object);
441
442 if (ret) {
443 atomic_inc(&object->usecount);
444 return ret;
445 }
446 }
447
448 return 0;
449}
450
451void
452nouveau_object_debug(void)
453{
454#ifdef NOUVEAU_OBJECT_MAGIC
455 struct nouveau_object *object;
456 if (!list_empty(&_objlist)) {
457 nv_fatal(NULL, "*******************************************\n");
458 nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
459 nv_fatal(NULL, "*******************************************\n");
460 list_for_each_entry(object, &_objlist, list) {
461 nv_fatal(object, "%p/%p/%d/%d\n",
462 object->parent, object->engine,
463 atomic_read(&object->refcount),
464 atomic_read(&object->usecount));
465 }
466 }
467#endif
468}
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 000000000000..62a432ea39e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26#include <core/debug.h>
27
28/* compares unterminated string 'str' with zero-terminated string 'cmp' */
29static inline int
30strncasecmpz(const char *str, const char *cmp, size_t len)
31{
32 if (strlen(cmp) != len)
33 return len;
34 return strncasecmp(str, cmp, len);
35}
36
37const char *
38nouveau_stropt(const char *optstr, const char *opt, int *arglen)
39{
40 while (optstr && *optstr != '\0') {
41 int len = strcspn(optstr, ",=");
42 switch (optstr[len]) {
43 case '=':
44 if (!strncasecmpz(optstr, opt, len)) {
45 optstr += len + 1;
46 *arglen = strcspn(optstr, ",=");
47 return *arglen ? optstr : NULL;
48 }
49 optstr++;
50 break;
51 case ',':
52 optstr++;
53 break;
54 default:
55 break;
56 }
57 optstr += len;
58 }
59
60 return NULL;
61}
62
63bool
64nouveau_boolopt(const char *optstr, const char *opt, bool value)
65{
66 int arglen;
67
68 optstr = nouveau_stropt(optstr, opt, &arglen);
69 if (optstr) {
70 if (!strncasecmpz(optstr, "0", arglen) ||
71 !strncasecmpz(optstr, "no", arglen) ||
72 !strncasecmpz(optstr, "off", arglen) ||
73 !strncasecmpz(optstr, "false", arglen))
74 value = false;
75 else
76 if (!strncasecmpz(optstr, "1", arglen) ||
77 !strncasecmpz(optstr, "yes", arglen) ||
78 !strncasecmpz(optstr, "on", arglen) ||
79 !strncasecmpz(optstr, "true", arglen))
80 value = true;
81 }
82
83 return value;
84}
85
86int
87nouveau_dbgopt(const char *optstr, const char *sub)
88{
89 int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
90
91 while (optstr) {
92 int len = strcspn(optstr, ",=");
93 switch (optstr[len]) {
94 case '=':
95 if (strncasecmpz(optstr, sub, len))
96 mode = 0;
97 optstr++;
98 break;
99 default:
100 if (mode) {
101 if (!strncasecmpz(optstr, "fatal", len))
102 level = NV_DBG_FATAL;
103 else if (!strncasecmpz(optstr, "error", len))
104 level = NV_DBG_ERROR;
105 else if (!strncasecmpz(optstr, "warn", len))
106 level = NV_DBG_WARN;
107 else if (!strncasecmpz(optstr, "info", len))
108 level = NV_DBG_INFO;
109 else if (!strncasecmpz(optstr, "debug", len))
110 level = NV_DBG_DEBUG;
111 else if (!strncasecmpz(optstr, "trace", len))
112 level = NV_DBG_TRACE;
113 else if (!strncasecmpz(optstr, "paranoia", len))
114 level = NV_DBG_PARANOIA;
115 else if (!strncasecmpz(optstr, "spam", len))
116 level = NV_DBG_SPAM;
117 }
118
119 if (optstr[len] != '\0') {
120 optstr++;
121 mode = 1;
122 break;
123 }
124
125 return level;
126 }
127 optstr += len;
128 }
129
130 return level;
131}
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 000000000000..a1ea034611d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/parent.h>
27
28int
29nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
30 struct nouveau_object **pengine,
31 struct nouveau_oclass **poclass)
32{
33 struct nouveau_sclass *sclass;
34 struct nouveau_engine *engine;
35 struct nouveau_oclass *oclass;
36 u64 mask;
37
38 sclass = nv_parent(parent)->sclass;
39 while (sclass) {
40 if ((sclass->oclass->handle & 0xffff) == handle) {
41 *pengine = parent->engine;
42 *poclass = sclass->oclass;
43 return 0;
44 }
45
46 sclass = sclass->sclass;
47 }
48
49 mask = nv_parent(parent)->engine;
50 while (mask) {
51 int i = ffsll(mask) - 1;
52
53 if ((engine = nouveau_engine(parent, i))) {
54 oclass = engine->sclass;
55 while (oclass->ofuncs) {
56 if ((oclass->handle & 0xffff) == handle) {
57 *pengine = nv_object(engine);
58 *poclass = oclass;
59 return 0;
60 }
61 oclass++;
62 }
63 }
64
65 mask &= ~(1ULL << i);
66 }
67
68 return -EINVAL;
69}
70
71int
72nouveau_parent_create_(struct nouveau_object *parent,
73 struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, u32 pclass,
75 struct nouveau_oclass *sclass, u64 engcls,
76 int size, void **pobject)
77{
78 struct nouveau_parent *object;
79 struct nouveau_sclass *nclass;
80 int ret;
81
82 ret = nouveau_object_create_(parent, engine, oclass, pclass |
83 NV_PARENT_CLASS, size, pobject);
84 object = *pobject;
85 if (ret)
86 return ret;
87
88 while (sclass && sclass->ofuncs) {
89 nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
90 if (!nclass)
91 return -ENOMEM;
92
93 nclass->sclass = object->sclass;
94 object->sclass = nclass;
95 nclass->engine = engine ? nv_engine(engine) : NULL;
96 nclass->oclass = sclass;
97 sclass++;
98 }
99
100 object->engine = engcls;
101 return 0;
102}
103
104int
105_nouveau_parent_ctor(struct nouveau_object *parent,
106 struct nouveau_object *engine,
107 struct nouveau_oclass *oclass, void *data, u32 size,
108 struct nouveau_object **pobject)
109{
110 struct nouveau_parent *object;
111 int ret;
112
113 ret = nouveau_parent_create(parent, engine, oclass, 0, NULL, 0, &object);
114 *pobject = nv_object(object);
115 if (ret)
116 return ret;
117
118 return 0;
119}
120
121void
122nouveau_parent_destroy(struct nouveau_parent *parent)
123{
124 struct nouveau_sclass *sclass;
125
126 while ((sclass = parent->sclass)) {
127 parent->sclass = sclass->sclass;
128 kfree(sclass);
129 }
130
131 nouveau_object_destroy(&parent->base);
132}
133
134
135void
136_nouveau_parent_dtor(struct nouveau_object *object)
137{
138 nouveau_parent_destroy(nv_parent(object));
139}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 000000000000..6161eaf5447c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/subdev.h>
28#include <core/printk.h>
29
30void
31nv_printk_(struct nouveau_object *object, const char *pfx, int level,
32 const char *fmt, ...)
33{
34 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
35 char mfmt[256];
36 va_list args;
37
38 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
39 struct nouveau_object *device = object;
40 struct nouveau_object *subdev = object;
41 char obuf[64], *ofmt = "";
42
43 if (object->engine) {
44 snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
45 nv_hclass(object), object);
46 ofmt = obuf;
47 subdev = object->engine;
48 device = object->engine;
49 }
50
51 if (subdev->parent)
52 device = subdev->parent;
53
54 if (level > nv_subdev(subdev)->debug)
55 return;
56
57 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
58 name[level], nv_subdev(subdev)->name,
59 nv_device(device)->name, ofmt, fmt);
60 } else
61 if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
62 if (level > nv_client(object)->debug)
63 return;
64
65 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
66 name[level], nv_client(object)->name, fmt);
67 } else {
68 snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
69 }
70
71 va_start(args, fmt);
72 vprintk(mfmt, args);
73 va_end(args);
74}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 000000000000..86a64045dd60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <core/object.h>
24#include <core/ramht.h>
25#include <core/math.h>
26
27#include <subdev/bar.h>
28
29static u32
30nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
31{
32 u32 hash = 0;
33
34 while (handle) {
35 hash ^= (handle & ((1 << ramht->bits) - 1));
36 handle >>= ramht->bits;
37 }
38
39 hash ^= chid << (ramht->bits - 4);
40 hash = hash << 3;
41 return hash;
42}
43
44int
45nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
46 u32 handle, u32 context)
47{
48 struct nouveau_bar *bar = nouveau_bar(ramht);
49 u32 co, ho;
50
51 co = ho = nouveau_ramht_hash(ramht, chid, handle);
52 do {
53 if (!nv_ro32(ramht, co + 4)) {
54 nv_wo32(ramht, co + 0, handle);
55 nv_wo32(ramht, co + 4, context);
56 if (bar)
57 bar->flush(bar);
58 return co;
59 }
60
61 co += 8;
62 if (co >= nv_gpuobj(ramht)->size)
63 co = 0;
64 } while (co != ho);
65
66 return -ENOMEM;
67}
68
69void
70nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
71{
72 struct nouveau_bar *bar = nouveau_bar(ramht);
73 nv_wo32(ramht, cookie + 0, 0x00000000);
74 nv_wo32(ramht, cookie + 4, 0x00000000);
75 if (bar)
76 bar->flush(bar);
77}
78
79static struct nouveau_oclass
80nouveau_ramht_oclass = {
81 .handle = 0x0000abcd,
82 .ofuncs = &(struct nouveau_ofuncs) {
83 .ctor = NULL,
84 .dtor = _nouveau_gpuobj_dtor,
85 .init = _nouveau_gpuobj_init,
86 .fini = _nouveau_gpuobj_fini,
87 .rd32 = _nouveau_gpuobj_rd32,
88 .wr32 = _nouveau_gpuobj_wr32,
89 },
90};
91
92int
93nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
94 u32 size, u32 align, struct nouveau_ramht **pramht)
95{
96 struct nouveau_ramht *ramht;
97 int ret;
98
99 ret = nouveau_gpuobj_create(parent, parent->engine ?
100 parent->engine : parent, /* <nv50 ramht */
101 &nouveau_ramht_oclass, 0, pargpu, size,
102 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
103 *pramht = ramht;
104 if (ret)
105 return ret;
106
107 ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
108 return 0;
109}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 000000000000..f74c30aa33a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/subdev.h>
27#include <core/device.h>
28#include <core/option.h>
29
30void
31nouveau_subdev_reset(struct nouveau_object *subdev)
32{
33 nv_trace(subdev, "resetting...\n");
34 nv_ofuncs(subdev)->fini(subdev, false);
35 nv_debug(subdev, "reset\n");
36}
37
38int
39nouveau_subdev_init(struct nouveau_subdev *subdev)
40{
41 int ret = nouveau_object_init(&subdev->base);
42 if (ret)
43 return ret;
44
45 nouveau_subdev_reset(&subdev->base);
46 return 0;
47}
48
49int
50_nouveau_subdev_init(struct nouveau_object *object)
51{
52 return nouveau_subdev_init(nv_subdev(object));
53}
54
55int
56nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
57{
58 if (subdev->unit) {
59 nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
60 nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
61 }
62
63 return nouveau_object_fini(&subdev->base, suspend);
64}
65
66int
67_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
68{
69 return nouveau_subdev_fini(nv_subdev(object), suspend);
70}
71
72void
73nouveau_subdev_destroy(struct nouveau_subdev *subdev)
74{
75 int subidx = nv_hclass(subdev) & 0xff;
76 nv_device(subdev)->subdev[subidx] = NULL;
77 nouveau_object_destroy(&subdev->base);
78}
79
80void
81_nouveau_subdev_dtor(struct nouveau_object *object)
82{
83 nouveau_subdev_destroy(nv_subdev(object));
84}
85
86int
87nouveau_subdev_create_(struct nouveau_object *parent,
88 struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, u32 pclass,
90 const char *subname, const char *sysname,
91 int size, void **pobject)
92{
93 struct nouveau_subdev *subdev;
94 int ret;
95
96 ret = nouveau_object_create_(parent, engine, oclass, pclass |
97 NV_SUBDEV_CLASS, size, pobject);
98 subdev = *pobject;
99 if (ret)
100 return ret;
101
102 mutex_init(&subdev->mutex);
103 subdev->name = subname;
104
105 if (parent) {
106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 }
113
114 return 0;
115}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 000000000000..66f7dfd907ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/bsp.h>
30
31struct nv84_bsp_priv {
32 struct nouveau_bsp base;
33};
34
35struct nv84_bsp_chan {
36 struct nouveau_bsp_chan base;
37};
38
39/*******************************************************************************
40 * BSP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_bsp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * BSP context
50 ******************************************************************************/
51
52static int
53nv84_bsp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_bsp_chan *priv;
59 int ret;
60
61 ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_bsp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_bsp_chan *priv = (void *)object;
74 nouveau_bsp_context_destroy(&priv->base);
75}
76
77static int
78nv84_bsp_context_init(struct nouveau_object *object)
79{
80 struct nv84_bsp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_bsp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_bsp_chan *priv = (void *)object;
94 return nouveau_bsp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_bsp_cclass = {
99 .handle = NV_ENGCTX(BSP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_bsp_context_ctor,
102 .dtor = nv84_bsp_context_dtor,
103 .init = nv84_bsp_context_init,
104 .fini = nv84_bsp_context_fini,
105 .rd32 = _nouveau_bsp_context_rd32,
106 .wr32 = _nouveau_bsp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * BSP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv84_bsp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_bsp_priv *priv;
125 int ret;
126
127 ret = nouveau_bsp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x04008000;
133 nv_subdev(priv)->intr = nv84_bsp_intr;
134 nv_engine(priv)->cclass = &nv84_bsp_cclass;
135 nv_engine(priv)->sclass = nv84_bsp_sclass;
136 return 0;
137}
138
139static void
140nv84_bsp_dtor(struct nouveau_object *object)
141{
142 struct nv84_bsp_priv *priv = (void *)object;
143 nouveau_bsp_destroy(&priv->base);
144}
145
146static int
147nv84_bsp_init(struct nouveau_object *object)
148{
149 struct nv84_bsp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_bsp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_bsp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_bsp_priv *priv = (void *)object;
163 return nouveau_bsp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_bsp_oclass = {
168 .handle = NV_ENGINE(BSP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_bsp_ctor,
171 .dtor = nv84_bsp_dtor,
172 .init = nv84_bsp_init,
173 .fini = nv84_bsp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
index 219850d53286..219850d53286 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
index 37d6de3c9d61..c92520f3ed46 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -1,4 +1,4 @@
1u32 nva3_pcopy_data[] = { 1static u32 nva3_pcopy_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_dma */ 4/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ u32 nva3_pcopy_data[] = {
183 0x00000800, 183 0x00000800,
184}; 184};
185 185
186u32 nva3_pcopy_code[] = { 186static u32 nva3_pcopy_code[] = {
187/* 0x0000: main */ 187/* 0x0000: main */
188 0x04fe04bd, 188 0x04fe04bd,
189 0x3517f000, 189 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
index cd879f31bb38..0d98c6c0958d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -1,4 +1,4 @@
1u32 nvc0_pcopy_data[] = { 1static u32 nvc0_pcopy_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_query_address_high */ 4/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ u32 nvc0_pcopy_data[] = {
171 0x00000800, 171 0x00000800,
172}; 172};
173 173
174u32 nvc0_pcopy_code[] = { 174static u32 nvc0_pcopy_code[] = {
175/* 0x0000: main */ 175/* 0x0000: main */
176 0x04fe04bd, 176 0x04fe04bd,
177 0x3517f000, 177 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 000000000000..4df6da0af740
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32
33#include <engine/fifo.h>
34#include <engine/copy.h>
35
36#include "fuc/nva3.fuc.h"
37
38struct nva3_copy_priv {
39 struct nouveau_copy base;
40};
41
42struct nva3_copy_chan {
43 struct nouveau_copy_chan base;
44};
45
46/*******************************************************************************
47 * Copy object classes
48 ******************************************************************************/
49
50static struct nouveau_oclass
51nva3_copy_sclass[] = {
52 { 0x85b5, &nouveau_object_ofuncs },
53 {}
54};
55
56/*******************************************************************************
57 * PCOPY context
58 ******************************************************************************/
59
60static int
61nva3_copy_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nva3_copy_chan *priv;
67 int ret;
68
69 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
70 NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass
79nva3_copy_cclass = {
80 .handle = NV_ENGCTX(COPY0, 0xa3),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nva3_copy_context_ctor,
83 .dtor = _nouveau_copy_context_dtor,
84 .init = _nouveau_copy_context_init,
85 .fini = _nouveau_copy_context_fini,
86 .rd32 = _nouveau_copy_context_rd32,
87 .wr32 = _nouveau_copy_context_wr32,
88
89 },
90};
91
92/*******************************************************************************
93 * PCOPY engine/subdev functions
94 ******************************************************************************/
95
96static const struct nouveau_enum nva3_copy_isr_error_name[] = {
97 { 0x0001, "ILLEGAL_MTHD" },
98 { 0x0002, "INVALID_ENUM" },
99 { 0x0003, "INVALID_BITFIELD" },
100 {}
101};
102
103static void
104nva3_copy_intr(struct nouveau_subdev *subdev)
105{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev);
108 struct nouveau_object *engctx;
109 struct nva3_copy_priv *priv = (void *)subdev;
110 u32 dispatch = nv_rd32(priv, 0x10401c);
111 u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
112 u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
113 u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
114 u32 addr = nv_rd32(priv, 0x104040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x104044);
118 int chid;
119
120 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx);
122
123 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR [");
125 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x104004, 0x00000040);
129 stat &= ~0x00000040;
130 }
131
132 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x104004, stat);
135 }
136
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx);
139}
140
141static int
142nva3_copy_tlb_flush(struct nouveau_engine *engine)
143{
144 nv50_vm_flush_engine(&engine->base, 0x0d);
145 return 0;
146}
147
148static int
149nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
150 struct nouveau_oclass *oclass, void *data, u32 size,
151 struct nouveau_object **pobject)
152{
153 bool enable = (nv_device(parent)->chipset != 0xaf);
154 struct nva3_copy_priv *priv;
155 int ret;
156
157 ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
158 *pobject = nv_object(priv);
159 if (ret)
160 return ret;
161
162 nv_subdev(priv)->unit = 0x00802000;
163 nv_subdev(priv)->intr = nva3_copy_intr;
164 nv_engine(priv)->cclass = &nva3_copy_cclass;
165 nv_engine(priv)->sclass = nva3_copy_sclass;
166 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
167 return 0;
168}
169
170static int
171nva3_copy_init(struct nouveau_object *object)
172{
173 struct nva3_copy_priv *priv = (void *)object;
174 int ret, i;
175
176 ret = nouveau_copy_init(&priv->base);
177 if (ret)
178 return ret;
179
180 /* disable all interrupts */
181 nv_wr32(priv, 0x104014, 0xffffffff);
182
183 /* upload ucode */
184 nv_wr32(priv, 0x1041c0, 0x01000000);
185 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
186 nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
187
188 nv_wr32(priv, 0x104180, 0x01000000);
189 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
190 if ((i & 0x3f) == 0)
191 nv_wr32(priv, 0x104188, i >> 6);
192 nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
193 }
194
195 /* start it running */
196 nv_wr32(priv, 0x10410c, 0x00000000);
197 nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
198 nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
199 return 0;
200}
201
202static int
203nva3_copy_fini(struct nouveau_object *object, bool suspend)
204{
205 struct nva3_copy_priv *priv = (void *)object;
206
207 nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
208 nv_wr32(priv, 0x104014, 0xffffffff);
209
210 return nouveau_copy_fini(&priv->base, suspend);
211}
212
213struct nouveau_oclass
214nva3_copy_oclass = {
215 .handle = NV_ENGINE(COPY0, 0xa3),
216 .ofuncs = &(struct nouveau_ofuncs) {
217 .ctor = nva3_copy_ctor,
218 .dtor = _nouveau_copy_dtor,
219 .init = nva3_copy_init,
220 .fini = nva3_copy_fini,
221 },
222};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 000000000000..06d4a8791055
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <engine/fifo.h>
31#include <engine/copy.h>
32
33#include "fuc/nvc0.fuc.h"
34
35struct nvc0_copy_priv {
36 struct nouveau_copy base;
37};
38
39struct nvc0_copy_chan {
40 struct nouveau_copy_chan base;
41};
42
43/*******************************************************************************
44 * Copy object classes
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_copy0_sclass[] = {
49 { 0x90b5, &nouveau_object_ofuncs },
50 {},
51};
52
53static struct nouveau_oclass
54nvc0_copy1_sclass[] = {
55 { 0x90b8, &nouveau_object_ofuncs },
56 {},
57};
58
59/*******************************************************************************
60 * PCOPY context
61 ******************************************************************************/
62
63static int
64nvc0_copy_context_ctor(struct nouveau_object *parent,
65 struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_copy_chan *priv;
70 int ret;
71
72 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
73 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81static struct nouveau_ofuncs
82nvc0_copy_context_ofuncs = {
83 .ctor = nvc0_copy_context_ctor,
84 .dtor = _nouveau_copy_context_dtor,
85 .init = _nouveau_copy_context_init,
86 .fini = _nouveau_copy_context_fini,
87 .rd32 = _nouveau_copy_context_rd32,
88 .wr32 = _nouveau_copy_context_wr32,
89};
90
91static struct nouveau_oclass
92nvc0_copy0_cclass = {
93 .handle = NV_ENGCTX(COPY0, 0xc0),
94 .ofuncs = &nvc0_copy_context_ofuncs,
95};
96
97static struct nouveau_oclass
98nvc0_copy1_cclass = {
99 .handle = NV_ENGCTX(COPY1, 0xc0),
100 .ofuncs = &nvc0_copy_context_ofuncs,
101};
102
103/*******************************************************************************
104 * PCOPY engine/subdev functions
105 ******************************************************************************/
106
107static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
108 { 0x0001, "ILLEGAL_MTHD" },
109 { 0x0002, "INVALID_ENUM" },
110 { 0x0003, "INVALID_BITFIELD" },
111 {}
112};
113
114static void
115nvc0_copy_intr(struct nouveau_subdev *subdev)
116{
117 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
118 struct nouveau_engine *engine = nv_engine(subdev);
119 struct nouveau_object *engctx;
120 int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
121 struct nvc0_copy_priv *priv = (void *)subdev;
122 u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
123 u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
124 u32 stat = intr & disp & ~(disp >> 16);
125 u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
126 u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
127 u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
128 u32 mthd = (addr & 0x07ff) << 2;
129 u32 subc = (addr & 0x3800) >> 11;
130 u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
131 int chid;
132
133 engctx = nouveau_engctx_get(engine, inst);
134 chid = pfifo->chid(pfifo, engctx);
135
136 if (stat & 0x00000040) {
137 nv_error(priv, "DISPATCH_ERROR [");
138 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
139 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
140 chid, (u64)inst << 12, subc, mthd, data);
141 nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
142 stat &= ~0x00000040;
143 }
144
145 if (stat) {
146 nv_error(priv, "unhandled intr 0x%08x\n", stat);
147 nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
148 }
149
150 nouveau_engctx_put(engctx);
151}
152
153static int
154nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
155 struct nouveau_oclass *oclass, void *data, u32 size,
156 struct nouveau_object **pobject)
157{
158 struct nvc0_copy_priv *priv;
159 int ret;
160
161 if (nv_rd32(parent, 0x022500) & 0x00000100)
162 return -ENODEV;
163
164 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
165 *pobject = nv_object(priv);
166 if (ret)
167 return ret;
168
169 nv_subdev(priv)->unit = 0x00000040;
170 nv_subdev(priv)->intr = nvc0_copy_intr;
171 nv_engine(priv)->cclass = &nvc0_copy0_cclass;
172 nv_engine(priv)->sclass = nvc0_copy0_sclass;
173 return 0;
174}
175
176static int
177nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
178 struct nouveau_oclass *oclass, void *data, u32 size,
179 struct nouveau_object **pobject)
180{
181 struct nvc0_copy_priv *priv;
182 int ret;
183
184 if (nv_rd32(parent, 0x022500) & 0x00000200)
185 return -ENODEV;
186
187 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
188 *pobject = nv_object(priv);
189 if (ret)
190 return ret;
191
192 nv_subdev(priv)->unit = 0x00000080;
193 nv_subdev(priv)->intr = nvc0_copy_intr;
194 nv_engine(priv)->cclass = &nvc0_copy1_cclass;
195 nv_engine(priv)->sclass = nvc0_copy1_sclass;
196 return 0;
197}
198
199static int
200nvc0_copy_init(struct nouveau_object *object)
201{
202 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
203 struct nvc0_copy_priv *priv = (void *)object;
204 int ret, i;
205
206 ret = nouveau_copy_init(&priv->base);
207 if (ret)
208 return ret;
209
210 /* disable all interrupts */
211 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
212
213 /* upload ucode */
214 nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
215 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
216 nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
217
218 nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
219 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
220 if ((i & 0x3f) == 0)
221 nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
222 nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
223 }
224
225 /* start it running */
226 nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
227 nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
228 nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
229 nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
230 return 0;
231}
232
233static int
234nvc0_copy_fini(struct nouveau_object *object, bool suspend)
235{
236 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
237 struct nvc0_copy_priv *priv = (void *)object;
238
239 nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
240 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
241
242 return nouveau_copy_fini(&priv->base, suspend);
243}
244
245struct nouveau_oclass
246nvc0_copy0_oclass = {
247 .handle = NV_ENGINE(COPY0, 0xc0),
248 .ofuncs = &(struct nouveau_ofuncs) {
249 .ctor = nvc0_copy0_ctor,
250 .dtor = _nouveau_copy_dtor,
251 .init = nvc0_copy_init,
252 .fini = nvc0_copy_fini,
253 },
254};
255
256struct nouveau_oclass
257nvc0_copy1_oclass = {
258 .handle = NV_ENGINE(COPY1, 0xc0),
259 .ofuncs = &(struct nouveau_ofuncs) {
260 .ctor = nvc0_copy1_ctor,
261 .dtor = _nouveau_copy_dtor,
262 .init = nvc0_copy_init,
263 .fini = nvc0_copy_fini,
264 },
265};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 000000000000..2017c1579ac5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <engine/copy.h>
31
32struct nve0_copy_priv {
33 struct nouveau_copy base;
34};
35
36struct nve0_copy_chan {
37 struct nouveau_copy_chan base;
38};
39
40/*******************************************************************************
41 * Copy object classes
42 ******************************************************************************/
43
44static struct nouveau_oclass
45nve0_copy_sclass[] = {
46 { 0xa0b5, &nouveau_object_ofuncs },
47 {},
48};
49
50/*******************************************************************************
51 * PCOPY context
52 ******************************************************************************/
53
54static int
55nve0_copy_context_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nve0_copy_chan *priv;
61 int ret;
62
63 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
64 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
65 *pobject = nv_object(priv);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static struct nouveau_ofuncs
73nve0_copy_context_ofuncs = {
74 .ctor = nve0_copy_context_ctor,
75 .dtor = _nouveau_copy_context_dtor,
76 .init = _nouveau_copy_context_init,
77 .fini = _nouveau_copy_context_fini,
78 .rd32 = _nouveau_copy_context_rd32,
79 .wr32 = _nouveau_copy_context_wr32,
80};
81
82static struct nouveau_oclass
83nve0_copy_cclass = {
84 .handle = NV_ENGCTX(COPY0, 0xc0),
85 .ofuncs = &nve0_copy_context_ofuncs,
86};
87
88/*******************************************************************************
89 * PCOPY engine/subdev functions
90 ******************************************************************************/
91
92static int
93nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
94 struct nouveau_oclass *oclass, void *data, u32 size,
95 struct nouveau_object **pobject)
96{
97 struct nve0_copy_priv *priv;
98 int ret;
99
100 if (nv_rd32(parent, 0x022500) & 0x00000100)
101 return -ENODEV;
102
103 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_subdev(priv)->unit = 0x00000040;
109 nv_engine(priv)->cclass = &nve0_copy_cclass;
110 nv_engine(priv)->sclass = nve0_copy_sclass;
111 return 0;
112}
113
114static int
115nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nve0_copy_priv *priv;
120 int ret;
121
122 if (nv_rd32(parent, 0x022500) & 0x00000200)
123 return -ENODEV;
124
125 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
126 *pobject = nv_object(priv);
127 if (ret)
128 return ret;
129
130 nv_subdev(priv)->unit = 0x00000080;
131 nv_engine(priv)->cclass = &nve0_copy_cclass;
132 nv_engine(priv)->sclass = nve0_copy_sclass;
133 return 0;
134}
135
136struct nouveau_oclass
137nve0_copy0_oclass = {
138 .handle = NV_ENGINE(COPY0, 0xe0),
139 .ofuncs = &(struct nouveau_ofuncs) {
140 .ctor = nve0_copy0_ctor,
141 .dtor = _nouveau_copy_dtor,
142 .init = _nouveau_copy_init,
143 .fini = _nouveau_copy_fini,
144 },
145};
146
147struct nouveau_oclass
148nve0_copy1_oclass = {
149 .handle = NV_ENGINE(COPY1, 0xe0),
150 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nve0_copy1_ctor,
152 .dtor = _nouveau_copy_dtor,
153 .init = _nouveau_copy_init,
154 .fini = _nouveau_copy_fini,
155 },
156};
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
index 7393813044de..629da02dc352 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -238,7 +238,7 @@ ih:
238 cmpu b32 $r4 0x60+#dma_count 238 cmpu b32 $r4 0x60+#dma_count
239 bra nc #illegal_mthd 239 bra nc #illegal_mthd
240 shl b32 $r5 $r4 2 240 shl b32 $r5 $r4 2
241 add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff 241 add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
242 bset $r3 0x1e 242 bset $r3 0x1e
243 st b32 D[$r5] $r3 243 st b32 D[$r5] $r3
244 add b32 $r4 0x180 - 0x60 244 add b32 $r4 0x180 - 0x60
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
index 38676c74e6e0..09962e4210e9 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -1,4 +1,4 @@
1uint32_t nv98_pcrypt_data[] = { 1static uint32_t nv98_pcrypt_data[] = {
2/* 0x0000: ctx_dma */ 2/* 0x0000: ctx_dma */
3/* 0x0000: ctx_dma_query */ 3/* 0x0000: ctx_dma_query */
4 0x00000000, 4 0x00000000,
@@ -150,7 +150,7 @@ uint32_t nv98_pcrypt_data[] = {
150 0x00000000, 150 0x00000000,
151}; 151};
152 152
153uint32_t nv98_pcrypt_code[] = { 153static uint32_t nv98_pcrypt_code[] = {
154 0x17f004bd, 154 0x17f004bd,
155 0x0010fe35, 155 0x0010fe35,
156 0xf10004fe, 156 0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 000000000000..1d85e5b66ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29#include <core/gpuobj.h>
30
31#include <subdev/fb.h>
32
33#include <engine/fifo.h>
34#include <engine/crypt.h>
35
36struct nv84_crypt_priv {
37 struct nouveau_crypt base;
38};
39
40struct nv84_crypt_chan {
41 struct nouveau_crypt_chan base;
42};
43
44/*******************************************************************************
45 * Crypt object classes
46 ******************************************************************************/
47
48static int
49nv84_crypt_object_ctor(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nouveau_gpuobj *obj;
55 int ret;
56
57 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
58 16, 16, 0, &obj);
59 *pobject = nv_object(obj);
60 if (ret)
61 return ret;
62
63 nv_wo32(obj, 0x00, nv_mclass(obj));
64 nv_wo32(obj, 0x04, 0x00000000);
65 nv_wo32(obj, 0x08, 0x00000000);
66 nv_wo32(obj, 0x0c, 0x00000000);
67 return 0;
68}
69
70static struct nouveau_ofuncs
71nv84_crypt_ofuncs = {
72 .ctor = nv84_crypt_object_ctor,
73 .dtor = _nouveau_gpuobj_dtor,
74 .init = _nouveau_gpuobj_init,
75 .fini = _nouveau_gpuobj_fini,
76 .rd32 = _nouveau_gpuobj_rd32,
77 .wr32 = _nouveau_gpuobj_wr32,
78};
79
80static struct nouveau_oclass
81nv84_crypt_sclass[] = {
82 { 0x74c1, &nv84_crypt_ofuncs },
83 {}
84};
85
86/*******************************************************************************
87 * PCRYPT context
88 ******************************************************************************/
89
90static int
91nv84_crypt_context_ctor(struct nouveau_object *parent,
92 struct nouveau_object *engine,
93 struct nouveau_oclass *oclass, void *data, u32 size,
94 struct nouveau_object **pobject)
95{
96 struct nv84_crypt_chan *priv;
97 int ret;
98
99 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
101 *pobject = nv_object(priv);
102 if (ret)
103 return ret;
104
105 return 0;
106}
107
108static struct nouveau_oclass
109nv84_crypt_cclass = {
110 .handle = NV_ENGCTX(CRYPT, 0x84),
111 .ofuncs = &(struct nouveau_ofuncs) {
112 .ctor = nv84_crypt_context_ctor,
113 .dtor = _nouveau_crypt_context_dtor,
114 .init = _nouveau_crypt_context_init,
115 .fini = _nouveau_crypt_context_fini,
116 .rd32 = _nouveau_crypt_context_rd32,
117 .wr32 = _nouveau_crypt_context_wr32,
118 },
119};
120
121/*******************************************************************************
122 * PCRYPT engine/subdev functions
123 ******************************************************************************/
124
125static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
126 { 0x00000001, "INVALID_STATE" },
127 { 0x00000002, "ILLEGAL_MTHD" },
128 { 0x00000004, "ILLEGAL_CLASS" },
129 { 0x00000080, "QUERY" },
130 { 0x00000100, "FAULT" },
131 {}
132};
133
134static void
135nv84_crypt_intr(struct nouveau_subdev *subdev)
136{
137 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
138 struct nouveau_engine *engine = nv_engine(subdev);
139 struct nouveau_object *engctx;
140 struct nv84_crypt_priv *priv = (void *)subdev;
141 u32 stat = nv_rd32(priv, 0x102130);
142 u32 mthd = nv_rd32(priv, 0x102190);
143 u32 data = nv_rd32(priv, 0x102194);
144 u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
145 int chid;
146
147 engctx = nouveau_engctx_get(engine, inst);
148 chid = pfifo->chid(pfifo, engctx);
149
150 if (stat) {
151 nv_error(priv, "");
152 nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
153 printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
154 chid, (u64)inst << 12, mthd, data);
155 }
156
157 nv_wr32(priv, 0x102130, stat);
158 nv_wr32(priv, 0x10200c, 0x10);
159
160 nv50_fb_trap(nouveau_fb(priv), 1);
161 nouveau_engctx_put(engctx);
162}
163
164static int
165nv84_crypt_tlb_flush(struct nouveau_engine *engine)
166{
167 nv50_vm_flush_engine(&engine->base, 0x0a);
168 return 0;
169}
170
171static int
172nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject)
175{
176 struct nv84_crypt_priv *priv;
177 int ret;
178
179 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
180 *pobject = nv_object(priv);
181 if (ret)
182 return ret;
183
184 nv_subdev(priv)->unit = 0x00004000;
185 nv_subdev(priv)->intr = nv84_crypt_intr;
186 nv_engine(priv)->cclass = &nv84_crypt_cclass;
187 nv_engine(priv)->sclass = nv84_crypt_sclass;
188 nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
189 return 0;
190}
191
192static int
193nv84_crypt_init(struct nouveau_object *object)
194{
195 struct nv84_crypt_priv *priv = (void *)object;
196 int ret;
197
198 ret = nouveau_crypt_init(&priv->base);
199 if (ret)
200 return ret;
201
202 nv_wr32(priv, 0x102130, 0xffffffff);
203 nv_wr32(priv, 0x102140, 0xffffffbf);
204 nv_wr32(priv, 0x10200c, 0x00000010);
205 return 0;
206}
207
208struct nouveau_oclass
209nv84_crypt_oclass = {
210 .handle = NV_ENGINE(CRYPT, 0x84),
211 .ofuncs = &(struct nouveau_ofuncs) {
212 .ctor = nv84_crypt_ctor,
213 .dtor = _nouveau_crypt_dtor,
214 .init = nv84_crypt_init,
215 .fini = _nouveau_crypt_fini,
216 },
217};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 000000000000..9e3876c89b96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,208 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <subdev/timer.h>
31#include <subdev/fb.h>
32
33#include <engine/fifo.h>
34#include <engine/crypt.h>
35
36#include "fuc/nv98.fuc.h"
37
38struct nv98_crypt_priv {
39 struct nouveau_crypt base;
40};
41
42struct nv98_crypt_chan {
43 struct nouveau_crypt_chan base;
44};
45
46/*******************************************************************************
47 * Crypt object classes
48 ******************************************************************************/
49
50static struct nouveau_oclass
51nv98_crypt_sclass[] = {
52 { 0x88b4, &nouveau_object_ofuncs },
53 {},
54};
55
56/*******************************************************************************
57 * PCRYPT context
58 ******************************************************************************/
59
60static int
61nv98_crypt_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nv98_crypt_chan *priv;
67 int ret;
68
69 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
70 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass
79nv98_crypt_cclass = {
80 .handle = NV_ENGCTX(CRYPT, 0x98),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv98_crypt_context_ctor,
83 .dtor = _nouveau_crypt_context_dtor,
84 .init = _nouveau_crypt_context_init,
85 .fini = _nouveau_crypt_context_fini,
86 .rd32 = _nouveau_crypt_context_rd32,
87 .wr32 = _nouveau_crypt_context_wr32,
88 },
89};
90
91/*******************************************************************************
92 * PCRYPT engine/subdev functions
93 ******************************************************************************/
94
95static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
96 { 0x0000, "ILLEGAL_MTHD" },
97 { 0x0001, "INVALID_BITFIELD" },
98 { 0x0002, "INVALID_ENUM" },
99 { 0x0003, "QUERY" },
100 {}
101};
102
103static void
104nv98_crypt_intr(struct nouveau_subdev *subdev)
105{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev);
108 struct nouveau_object *engctx;
109 struct nv98_crypt_priv *priv = (void *)subdev;
110 u32 disp = nv_rd32(priv, 0x08701c);
111 u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
112 u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
113 u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
114 u32 addr = nv_rd32(priv, 0x087040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x087044);
118 int chid;
119
120 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx);
122
123 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR [");
125 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, (u64)inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x087004, 0x00000040);
129 stat &= ~0x00000040;
130 }
131
132 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x087004, stat);
135 }
136
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx);
139}
140
141static int
142nv98_crypt_tlb_flush(struct nouveau_engine *engine)
143{
144 nv50_vm_flush_engine(&engine->base, 0x0a);
145 return 0;
146}
147
148static int
149nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
150 struct nouveau_oclass *oclass, void *data, u32 size,
151 struct nouveau_object **pobject)
152{
153 struct nv98_crypt_priv *priv;
154 int ret;
155
156 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
157 *pobject = nv_object(priv);
158 if (ret)
159 return ret;
160
161 nv_subdev(priv)->unit = 0x00004000;
162 nv_subdev(priv)->intr = nv98_crypt_intr;
163 nv_engine(priv)->cclass = &nv98_crypt_cclass;
164 nv_engine(priv)->sclass = nv98_crypt_sclass;
165 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
166 return 0;
167}
168
169static int
170nv98_crypt_init(struct nouveau_object *object)
171{
172 struct nv98_crypt_priv *priv = (void *)object;
173 int ret, i;
174
175 ret = nouveau_crypt_init(&priv->base);
176 if (ret)
177 return ret;
178
179 /* wait for exit interrupt to signal */
180 nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
181 nv_wr32(priv, 0x087004, 0x00000010);
182
183 /* upload microcode code and data segments */
184 nv_wr32(priv, 0x087ff8, 0x00100000);
185 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
186 nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
187
188 nv_wr32(priv, 0x087ff8, 0x00000000);
189 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
190 nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
191
192 /* start it running */
193 nv_wr32(priv, 0x08710c, 0x00000000);
194 nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
195 nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
196 return 0;
197}
198
199struct nouveau_oclass
200nv98_crypt_oclass = {
201 .handle = NV_ENGINE(CRYPT, 0x98),
202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv98_crypt_ctor,
204 .dtor = _nouveau_crypt_dtor,
205 .init = nv98_crypt_init,
206 .fini = _nouveau_crypt_fini,
207 },
208};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 000000000000..1c919f2af89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/disp.h>
26
27struct nv04_disp_priv {
28 struct nouveau_disp base;
29};
30
31static struct nouveau_oclass
32nv04_disp_sclass[] = {
33 {},
34};
35
36static void
37nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
38{
39 struct nouveau_disp *disp = &priv->base;
40 if (disp->vblank.notify)
41 disp->vblank.notify(disp->vblank.data, crtc);
42}
43
44static void
45nv04_disp_intr(struct nouveau_subdev *subdev)
46{
47 struct nv04_disp_priv *priv = (void *)subdev;
48 u32 crtc0 = nv_rd32(priv, 0x600100);
49 u32 crtc1 = nv_rd32(priv, 0x602100);
50
51 if (crtc0 & 0x00000001) {
52 nv04_disp_intr_vblank(priv, 0);
53 nv_wr32(priv, 0x600100, 0x00000001);
54 }
55
56 if (crtc1 & 0x00000001) {
57 nv04_disp_intr_vblank(priv, 1);
58 nv_wr32(priv, 0x602100, 0x00000001);
59 }
60}
61
62static int
63nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
66{
67 struct nv04_disp_priv *priv;
68 int ret;
69
70 ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
71 "display", &priv);
72 *pobject = nv_object(priv);
73 if (ret)
74 return ret;
75
76 nv_engine(priv)->sclass = nv04_disp_sclass;
77 nv_subdev(priv)->intr = nv04_disp_intr;
78 return 0;
79}
80
81struct nouveau_oclass
82nv04_disp_oclass = {
83 .handle = NV_ENGINE(DISP, 0x04),
84 .ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nv04_disp_ctor,
86 .dtor = _nouveau_disp_dtor,
87 .init = _nouveau_disp_init,
88 .fini = _nouveau_disp_fini,
89 },
90};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 000000000000..16a9afb1060b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28struct nv50_disp_priv {
29 struct nouveau_disp base;
30};
31
32static struct nouveau_oclass
33nv50_disp_sclass[] = {
34 {},
35};
36
37static void
38nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
39{
40 struct nouveau_disp *disp = &priv->base;
41 struct nouveau_software_chan *chan, *temp;
42 unsigned long flags;
43
44 spin_lock_irqsave(&disp->vblank.lock, flags);
45 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
46 if (chan->vblank.crtc != crtc)
47 continue;
48
49 nv_wr32(priv, 0x001704, chan->vblank.channel);
50 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
51
52 if (nv_device(priv)->chipset == 0x50) {
53 nv_wr32(priv, 0x001570, chan->vblank.offset);
54 nv_wr32(priv, 0x001574, chan->vblank.value);
55 } else {
56 if (nv_device(priv)->chipset >= 0xc0) {
57 nv_wr32(priv, 0x06000c,
58 upper_32_bits(chan->vblank.offset));
59 }
60 nv_wr32(priv, 0x060010, chan->vblank.offset);
61 nv_wr32(priv, 0x060014, chan->vblank.value);
62 }
63
64 list_del(&chan->vblank.head);
65 if (disp->vblank.put)
66 disp->vblank.put(disp->vblank.data, crtc);
67 }
68 spin_unlock_irqrestore(&disp->vblank.lock, flags);
69
70 if (disp->vblank.notify)
71 disp->vblank.notify(disp->vblank.data, crtc);
72}
73
74static void
75nv50_disp_intr(struct nouveau_subdev *subdev)
76{
77 struct nv50_disp_priv *priv = (void *)subdev;
78 u32 stat1 = nv_rd32(priv, 0x610024);
79
80 if (stat1 & 0x00000004) {
81 nv50_disp_intr_vblank(priv, 0);
82 nv_wr32(priv, 0x610024, 0x00000004);
83 stat1 &= ~0x00000004;
84 }
85
86 if (stat1 & 0x00000008) {
87 nv50_disp_intr_vblank(priv, 1);
88 nv_wr32(priv, 0x610024, 0x00000008);
89 stat1 &= ~0x00000008;
90 }
91
92}
93
94static int
95nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, void *data, u32 size,
97 struct nouveau_object **pobject)
98{
99 struct nv50_disp_priv *priv;
100 int ret;
101
102 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
103 "display", &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_engine(priv)->sclass = nv50_disp_sclass;
109 nv_subdev(priv)->intr = nv50_disp_intr;
110
111 INIT_LIST_HEAD(&priv->base.vblank.list);
112 spin_lock_init(&priv->base.vblank.lock);
113 return 0;
114}
115
116struct nouveau_oclass
117nv50_disp_oclass = {
118 .handle = NV_ENGINE(DISP, 0x50),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv50_disp_ctor,
121 .dtor = _nouveau_disp_dtor,
122 .init = _nouveau_disp_init,
123 .fini = _nouveau_disp_fini,
124 },
125};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 000000000000..d93efbcf75b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bar.h>
26
27#include <engine/software.h>
28#include <engine/disp.h>
29
30struct nvd0_disp_priv {
31 struct nouveau_disp base;
32};
33
34static struct nouveau_oclass
35nvd0_disp_sclass[] = {
36 {},
37};
38
39static void
40nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
41{
42 struct nouveau_bar *bar = nouveau_bar(priv);
43 struct nouveau_disp *disp = &priv->base;
44 struct nouveau_software_chan *chan, *temp;
45 unsigned long flags;
46
47 spin_lock_irqsave(&disp->vblank.lock, flags);
48 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
49 if (chan->vblank.crtc != crtc)
50 continue;
51
52 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
53 bar->flush(bar);
54 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
55 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
56 nv_wr32(priv, 0x060014, chan->vblank.value);
57
58 list_del(&chan->vblank.head);
59 if (disp->vblank.put)
60 disp->vblank.put(disp->vblank.data, crtc);
61 }
62 spin_unlock_irqrestore(&disp->vblank.lock, flags);
63
64 if (disp->vblank.notify)
65 disp->vblank.notify(disp->vblank.data, crtc);
66}
67
68static void
69nvd0_disp_intr(struct nouveau_subdev *subdev)
70{
71 struct nvd0_disp_priv *priv = (void *)subdev;
72 u32 intr = nv_rd32(priv, 0x610088);
73 int i;
74
75 for (i = 0; i < 4; i++) {
76 u32 mask = 0x01000000 << i;
77 if (mask & intr) {
78 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
79 if (stat & 0x00000001)
80 nvd0_disp_intr_vblank(priv, i);
81 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
82 nv_rd32(priv, 0x6100c0 + (i * 0x800));
83 }
84 }
85}
86
87static int
88nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, void *data, u32 size,
90 struct nouveau_object **pobject)
91{
92 struct nvd0_disp_priv *priv;
93 int ret;
94
95 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
96 "display", &priv);
97 *pobject = nv_object(priv);
98 if (ret)
99 return ret;
100
101 nv_engine(priv)->sclass = nvd0_disp_sclass;
102 nv_subdev(priv)->intr = nvd0_disp_intr;
103
104 INIT_LIST_HEAD(&priv->base.vblank.list);
105 spin_lock_init(&priv->base.vblank.lock);
106 return 0;
107}
108
109struct nouveau_oclass
110nvd0_disp_oclass = {
111 .handle = NV_ENGINE(DISP, 0xd0),
112 .ofuncs = &(struct nouveau_ofuncs) {
113 .ctor = nvd0_disp_ctor,
114 .dtor = _nouveau_disp_dtor,
115 .init = _nouveau_disp_init,
116 .fini = _nouveau_disp_fini,
117 },
118};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
new file mode 100644
index 000000000000..5a1c68474597
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/subdev.h>
26#include <core/device.h>
27#include <subdev/vga.h>
28
29u8
30nv_rdport(void *obj, int head, u16 port)
31{
32 struct nouveau_device *device = nv_device(obj);
33
34 if (device->card_type >= NV_50)
35 return nv_rd08(obj, 0x601000 + port);
36
37 if (port == 0x03c0 || port == 0x03c1 || /* AR */
38 port == 0x03c2 || port == 0x03da || /* INP0 */
39 port == 0x03d4 || port == 0x03d5) /* CR */
40 return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
41
42 if (port == 0x03c2 || port == 0x03cc || /* MISC */
43 port == 0x03c4 || port == 0x03c5 || /* SR */
44 port == 0x03ce || port == 0x03cf) { /* GR */
45 if (device->card_type < NV_40)
46 head = 0; /* CR44 selects head */
47 return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
48 }
49
50 nv_error(obj, "unknown vga port 0x%04x\n", port);
51 return 0x00;
52}
53
54void
55nv_wrport(void *obj, int head, u16 port, u8 data)
56{
57 struct nouveau_device *device = nv_device(obj);
58
59 if (device->card_type >= NV_50)
60 nv_wr08(obj, 0x601000 + port, data);
61 else
62 if (port == 0x03c0 || port == 0x03c1 || /* AR */
63 port == 0x03c2 || port == 0x03da || /* INP0 */
64 port == 0x03d4 || port == 0x03d5) /* CR */
65 nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
66 else
67 if (port == 0x03c2 || port == 0x03cc || /* MISC */
68 port == 0x03c4 || port == 0x03c5 || /* SR */
69 port == 0x03ce || port == 0x03cf) { /* GR */
70 if (device->card_type < NV_40)
71 head = 0; /* CR44 selects head */
72 nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
73 } else
74 nv_error(obj, "unknown vga port 0x%04x\n", port);
75}
76
77u8
78nv_rdvgas(void *obj, int head, u8 index)
79{
80 nv_wrport(obj, head, 0x03c4, index);
81 return nv_rdport(obj, head, 0x03c5);
82}
83
84void
85nv_wrvgas(void *obj, int head, u8 index, u8 value)
86{
87 nv_wrport(obj, head, 0x03c4, index);
88 nv_wrport(obj, head, 0x03c5, value);
89}
90
91u8
92nv_rdvgag(void *obj, int head, u8 index)
93{
94 nv_wrport(obj, head, 0x03ce, index);
95 return nv_rdport(obj, head, 0x03cf);
96}
97
98void
99nv_wrvgag(void *obj, int head, u8 index, u8 value)
100{
101 nv_wrport(obj, head, 0x03ce, index);
102 nv_wrport(obj, head, 0x03cf, value);
103}
104
105u8
106nv_rdvgac(void *obj, int head, u8 index)
107{
108 nv_wrport(obj, head, 0x03d4, index);
109 return nv_rdport(obj, head, 0x03d5);
110}
111
112void
113nv_wrvgac(void *obj, int head, u8 index, u8 value)
114{
115 nv_wrport(obj, head, 0x03d4, index);
116 nv_wrport(obj, head, 0x03d5, value);
117}
118
119u8
120nv_rdvgai(void *obj, int head, u16 port, u8 index)
121{
122 if (port == 0x03c4) return nv_rdvgas(obj, head, index);
123 if (port == 0x03ce) return nv_rdvgag(obj, head, index);
124 if (port == 0x03d4) return nv_rdvgac(obj, head, index);
125 nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
126 return 0x00;
127}
128
129void
130nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
131{
132 if (port == 0x03c4) nv_wrvgas(obj, head, index, value);
133 else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
134 else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
135 else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
136}
137
138bool
139nv_lockvgac(void *obj, bool lock)
140{
141 bool locked = !nv_rdvgac(obj, 0, 0x1f);
142 u8 data = lock ? 0x99 : 0x57;
143 nv_wrvgac(obj, 0, 0x1f, data);
144 if (nv_device(obj)->chipset == 0x11) {
145 if (!(nv_rd32(obj, 0x001084) & 0x10000000))
146 nv_wrvgac(obj, 1, 0x1f, data);
147 }
148 return locked;
149}
150
151/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
152 * it affects only the 8 bit vga io regs, which we access using mmio at
153 * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
154 * in general, the set value of cr44 does not matter: reg access works as
155 * expected and values can be set for the appropriate head by using a 0x2000
156 * offset as required
157 * however:
158 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
159 * cr44 must be set to 0 or 3 for accessing values on the correct head
160 * through the common 0xc03c* addresses
161 * b) in tied mode (4) head B is programmed to the values set on head A, and
162 * access using the head B addresses can have strange results, ergo we leave
163 * tied mode in init once we know to what cr44 should be restored on exit
164 *
165 * the owner parameter is slightly abused:
166 * 0 and 1 are treated as head values and so the set value is (owner * 3)
167 * other values are treated as literal values to set
168 */
169u8
170nv_rdvgaowner(void *obj)
171{
172 if (nv_device(obj)->card_type < NV_50) {
173 if (nv_device(obj)->chipset == 0x11) {
174 u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
175 if (tied == 0) {
176 u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
177 u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
178 u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
179 u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
180 if (slA && !tvA) return 0x00;
181 if (slB && !tvB) return 0x03;
182 if (slA) return 0x00;
183 if (slB) return 0x03;
184 return 0x00;
185 }
186 return 0x04;
187 }
188
189 return nv_rdvgac(obj, 0, 0x44);
190 }
191
192 nv_error(obj, "rdvgaowner after nv4x\n");
193 return 0x00;
194}
195
196void
197nv_wrvgaowner(void *obj, u8 select)
198{
199 if (nv_device(obj)->card_type < NV_50) {
200 u8 owner = (select == 1) ? 3 : select;
201 if (nv_device(obj)->chipset == 0x11) {
202 /* workaround hw lockup bug */
203 nv_rdvgac(obj, 0, 0x1f);
204 nv_rdvgac(obj, 1, 0x1f);
205 }
206
207 nv_wrvgac(obj, 0, 0x44, owner);
208
209 if (nv_device(obj)->chipset == 0x11) {
210 nv_wrvgac(obj, 0, 0x2e, owner);
211 nv_wrvgac(obj, 0, 0x2e, owner);
212 }
213 } else
214 nv_error(obj, "wrvgaowner after nv4x\n");
215}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 000000000000..e1f013d39768
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30
31int
32nouveau_dmaobj_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 void *data, u32 size, int len, void **pobject)
36{
37 struct nv_dma_class *args = data;
38 struct nouveau_dmaobj *object;
39 int ret;
40
41 if (size < sizeof(*args))
42 return -EINVAL;
43
44 ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
45 object = *pobject;
46 if (ret)
47 return ret;
48
49 switch (args->flags & NV_DMA_TARGET_MASK) {
50 case NV_DMA_TARGET_VM:
51 object->target = NV_MEM_TARGET_VM;
52 break;
53 case NV_DMA_TARGET_VRAM:
54 object->target = NV_MEM_TARGET_VRAM;
55 break;
56 case NV_DMA_TARGET_PCI:
57 object->target = NV_MEM_TARGET_PCI;
58 break;
59 case NV_DMA_TARGET_PCI_US:
60 case NV_DMA_TARGET_AGP:
61 object->target = NV_MEM_TARGET_PCI_NOSNOOP;
62 break;
63 default:
64 return -EINVAL;
65 }
66
67 switch (args->flags & NV_DMA_ACCESS_MASK) {
68 case NV_DMA_ACCESS_VM:
69 object->access = NV_MEM_ACCESS_VM;
70 break;
71 case NV_DMA_ACCESS_RD:
72 object->access = NV_MEM_ACCESS_RO;
73 break;
74 case NV_DMA_ACCESS_WR:
75 object->access = NV_MEM_ACCESS_WO;
76 break;
77 case NV_DMA_ACCESS_RDWR:
78 object->access = NV_MEM_ACCESS_RW;
79 break;
80 default:
81 return -EINVAL;
82 }
83
84 object->start = args->start;
85 object->limit = args->limit;
86 return 0;
87}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 000000000000..9f4cc2f31994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <subdev/vm/nv04.h>
30
31#include <engine/dmaobj.h>
32
33struct nv04_dmaeng_priv {
34 struct nouveau_dmaeng base;
35};
36
37struct nv04_dmaobj_priv {
38 struct nouveau_dmaobj base;
39};
40
41static int
42nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
43 struct nouveau_object *parent,
44 struct nouveau_dmaobj *dmaobj,
45 struct nouveau_gpuobj **pgpuobj)
46{
47 struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
48 struct nouveau_gpuobj *gpuobj;
49 u32 flags0 = nv_mclass(dmaobj);
50 u32 flags2 = 0x00000000;
51 u64 offset = dmaobj->start & 0xfffff000;
52 u64 adjust = dmaobj->start & 0x00000fff;
53 u32 length = dmaobj->limit - dmaobj->start;
54 int ret;
55
56 if (dmaobj->target == NV_MEM_TARGET_VM) {
57 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
58 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
59 if (!dmaobj->start)
60 return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
61 offset = nv_ro32(pgt, 8 + (offset >> 10));
62 offset &= 0xfffff000;
63 }
64
65 dmaobj->target = NV_MEM_TARGET_PCI;
66 dmaobj->access = NV_MEM_ACCESS_RW;
67 }
68
69 switch (dmaobj->target) {
70 case NV_MEM_TARGET_VRAM:
71 flags0 |= 0x00003000;
72 break;
73 case NV_MEM_TARGET_PCI:
74 flags0 |= 0x00023000;
75 break;
76 case NV_MEM_TARGET_PCI_NOSNOOP:
77 flags0 |= 0x00033000;
78 break;
79 default:
80 return -EINVAL;
81 }
82
83 switch (dmaobj->access) {
84 case NV_MEM_ACCESS_RO:
85 flags0 |= 0x00004000;
86 break;
87 case NV_MEM_ACCESS_WO:
88 flags0 |= 0x00008000;
89 case NV_MEM_ACCESS_RW:
90 flags2 |= 0x00000002;
91 break;
92 default:
93 return -EINVAL;
94 }
95
96 ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
97 *pgpuobj = gpuobj;
98 if (ret == 0) {
99 nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
100 nv_wo32(*pgpuobj, 0x04, length);
101 nv_wo32(*pgpuobj, 0x08, flags2 | offset);
102 nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
103 }
104
105 return ret;
106}
107
108static int
109nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_dmaeng *dmaeng = (void *)engine;
114 struct nv04_dmaobj_priv *dmaobj;
115 struct nouveau_gpuobj *gpuobj;
116 int ret;
117
118 ret = nouveau_dmaobj_create(parent, engine, oclass,
119 data, size, &dmaobj);
120 *pobject = nv_object(dmaobj);
121 if (ret)
122 return ret;
123
124 switch (nv_mclass(parent)) {
125 case NV_DEVICE_CLASS:
126 break;
127 case NV03_CHANNEL_DMA_CLASS:
128 case NV10_CHANNEL_DMA_CLASS:
129 case NV17_CHANNEL_DMA_CLASS:
130 case NV40_CHANNEL_DMA_CLASS:
131 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
132 nouveau_object_ref(NULL, pobject);
133 *pobject = nv_object(gpuobj);
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return ret;
140}
141
142static struct nouveau_ofuncs
143nv04_dmaobj_ofuncs = {
144 .ctor = nv04_dmaobj_ctor,
145 .dtor = _nouveau_dmaobj_dtor,
146 .init = _nouveau_dmaobj_init,
147 .fini = _nouveau_dmaobj_fini,
148};
149
150static struct nouveau_oclass
151nv04_dmaobj_sclass[] = {
152 { 0x0002, &nv04_dmaobj_ofuncs },
153 { 0x0003, &nv04_dmaobj_ofuncs },
154 { 0x003d, &nv04_dmaobj_ofuncs },
155 {}
156};
157
158static int
159nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject)
162{
163 struct nv04_dmaeng_priv *priv;
164 int ret;
165
166 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
167 *pobject = nv_object(priv);
168 if (ret)
169 return ret;
170
171 priv->base.base.sclass = nv04_dmaobj_sclass;
172 priv->base.bind = nv04_dmaobj_bind;
173 return 0;
174}
175
176struct nouveau_oclass
177nv04_dmaeng_oclass = {
178 .handle = NV_ENGINE(DMAOBJ, 0x04),
179 .ofuncs = &(struct nouveau_ofuncs) {
180 .ctor = nv04_dmaeng_ctor,
181 .dtor = _nouveau_dmaeng_dtor,
182 .init = _nouveau_dmaeng_init,
183 .fini = _nouveau_dmaeng_fini,
184 },
185};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 000000000000..045d2565e289
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30
31struct nv50_dmaeng_priv {
32 struct nouveau_dmaeng base;
33};
34
35struct nv50_dmaobj_priv {
36 struct nouveau_dmaobj base;
37};
38
39static int
40nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
41 struct nouveau_object *parent,
42 struct nouveau_dmaobj *dmaobj,
43 struct nouveau_gpuobj **pgpuobj)
44{
45 u32 flags = nv_mclass(dmaobj);
46 int ret;
47
48 switch (dmaobj->target) {
49 case NV_MEM_TARGET_VM:
50 flags |= 0x00000000;
51 flags |= 0x60000000; /* COMPRESSION_USEVM */
52 flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
53 break;
54 case NV_MEM_TARGET_VRAM:
55 flags |= 0x00010000;
56 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
57 break;
58 case NV_MEM_TARGET_PCI:
59 flags |= 0x00020000;
60 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
61 break;
62 case NV_MEM_TARGET_PCI_NOSNOOP:
63 flags |= 0x00030000;
64 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
65 break;
66 default:
67 return -EINVAL;
68 }
69
70 switch (dmaobj->access) {
71 case NV_MEM_ACCESS_VM:
72 break;
73 case NV_MEM_ACCESS_RO:
74 flags |= 0x00040000;
75 break;
76 case NV_MEM_ACCESS_WO:
77 case NV_MEM_ACCESS_RW:
78 flags |= 0x00080000;
79 break;
80 }
81
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags);
85 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
86 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
87 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
88 upper_32_bits(dmaobj->start));
89 nv_wo32(*pgpuobj, 0x10, 0x00000000);
90 nv_wo32(*pgpuobj, 0x14, 0x00000000);
91 }
92
93 return ret;
94}
95
96static int
97nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 struct nouveau_oclass *oclass, void *data, u32 size,
99 struct nouveau_object **pobject)
100{
101 struct nouveau_dmaeng *dmaeng = (void *)engine;
102 struct nv50_dmaobj_priv *dmaobj;
103 struct nouveau_gpuobj *gpuobj;
104 int ret;
105
106 ret = nouveau_dmaobj_create(parent, engine, oclass,
107 data, size, &dmaobj);
108 *pobject = nv_object(dmaobj);
109 if (ret)
110 return ret;
111
112 switch (nv_mclass(parent)) {
113 case NV_DEVICE_CLASS:
114 break;
115 case NV50_CHANNEL_DMA_CLASS:
116 case NV84_CHANNEL_DMA_CLASS:
117 case NV50_CHANNEL_IND_CLASS:
118 case NV84_CHANNEL_IND_CLASS:
119 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
120 nouveau_object_ref(NULL, pobject);
121 *pobject = nv_object(gpuobj);
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 return ret;
128}
129
130static struct nouveau_ofuncs
131nv50_dmaobj_ofuncs = {
132 .ctor = nv50_dmaobj_ctor,
133 .dtor = _nouveau_dmaobj_dtor,
134 .init = _nouveau_dmaobj_init,
135 .fini = _nouveau_dmaobj_fini,
136};
137
138static struct nouveau_oclass
139nv50_dmaobj_sclass[] = {
140 { 0x0002, &nv50_dmaobj_ofuncs },
141 { 0x0003, &nv50_dmaobj_ofuncs },
142 { 0x003d, &nv50_dmaobj_ofuncs },
143 {}
144};
145
146static int
147nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
148 struct nouveau_oclass *oclass, void *data, u32 size,
149 struct nouveau_object **pobject)
150{
151 struct nv50_dmaeng_priv *priv;
152 int ret;
153
154 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
155 *pobject = nv_object(priv);
156 if (ret)
157 return ret;
158
159 priv->base.base.sclass = nv50_dmaobj_sclass;
160 priv->base.bind = nv50_dmaobj_bind;
161 return 0;
162}
163
164struct nouveau_oclass
165nv50_dmaeng_oclass = {
166 .handle = NV_ENGINE(DMAOBJ, 0x50),
167 .ofuncs = &(struct nouveau_ofuncs) {
168 .ctor = nv50_dmaeng_ctor,
169 .dtor = _nouveau_dmaeng_dtor,
170 .init = _nouveau_dmaeng_init,
171 .fini = _nouveau_dmaeng_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 000000000000..5baa08695535
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/fb.h>
28#include <engine/dmaobj.h>
29
30struct nvc0_dmaeng_priv {
31 struct nouveau_dmaeng base;
32};
33
34struct nvc0_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int
39nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
40 struct nouveau_oclass *oclass, void *data, u32 size,
41 struct nouveau_object **pobject)
42{
43 struct nvc0_dmaobj_priv *dmaobj;
44 int ret;
45
46 ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
47 *pobject = nv_object(dmaobj);
48 if (ret)
49 return ret;
50
51 if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
52 return -EINVAL;
53
54 return 0;
55}
56
57static struct nouveau_ofuncs
58nvc0_dmaobj_ofuncs = {
59 .ctor = nvc0_dmaobj_ctor,
60 .dtor = _nouveau_dmaobj_dtor,
61 .init = _nouveau_dmaobj_init,
62 .fini = _nouveau_dmaobj_fini,
63};
64
65static struct nouveau_oclass
66nvc0_dmaobj_sclass[] = {
67 { 0x0002, &nvc0_dmaobj_ofuncs },
68 { 0x0003, &nvc0_dmaobj_ofuncs },
69 { 0x003d, &nvc0_dmaobj_ofuncs },
70 {}
71};
72
73static int
74nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 struct nouveau_oclass *oclass, void *data, u32 size,
76 struct nouveau_object **pobject)
77{
78 struct nvc0_dmaeng_priv *priv;
79 int ret;
80
81 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv);
83 if (ret)
84 return ret;
85
86 priv->base.base.sclass = nvc0_dmaobj_sclass;
87 return 0;
88}
89
90struct nouveau_oclass
91nvc0_dmaeng_oclass = {
92 .handle = NV_ENGINE(DMAOBJ, 0xc0),
93 .ofuncs = &(struct nouveau_ofuncs) {
94 .ctor = nvc0_dmaeng_ctor,
95 .dtor = _nouveau_dmaeng_dtor,
96 .init = _nouveau_dmaeng_init,
97 .fini = _nouveau_dmaeng_fini,
98 },
99};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 000000000000..bbb43c67c2ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/handle.h>
27
28#include <engine/dmaobj.h>
29#include <engine/fifo.h>
30
31int
32nouveau_fifo_channel_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 int bar, u32 addr, u32 size, u32 pushbuf,
36 u32 engmask, int len, void **ptr)
37{
38 struct nouveau_device *device = nv_device(engine);
39 struct nouveau_fifo *priv = (void *)engine;
40 struct nouveau_fifo_chan *chan;
41 struct nouveau_dmaeng *dmaeng;
42 unsigned long flags;
43 int ret;
44
45 /* create base object class */
46 ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
47 engmask, len, ptr);
48 chan = *ptr;
49 if (ret)
50 return ret;
51
52 /* validate dma object representing push buffer */
53 chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
54 if (!chan->pushdma)
55 return -ENOENT;
56
57 dmaeng = (void *)chan->pushdma->base.engine;
58 switch (chan->pushdma->base.oclass->handle) {
59 case 0x0002:
60 case 0x003d:
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 if (dmaeng->bind) {
67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
68 if (ret)
69 return ret;
70 }
71
72 /* find a free fifo channel */
73 spin_lock_irqsave(&priv->lock, flags);
74 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
75 if (!priv->channel[chan->chid]) {
76 priv->channel[chan->chid] = nv_object(chan);
77 break;
78 }
79 }
80 spin_unlock_irqrestore(&priv->lock, flags);
81
82 if (chan->chid == priv->max) {
83 nv_error(priv, "no free channels\n");
84 return -ENOSPC;
85 }
86
87 /* map fifo control registers */
88 chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
89 (chan->chid * size), size);
90 if (!chan->user)
91 return -EFAULT;
92
93 chan->size = size;
94 return 0;
95}
96
97void
98nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
99{
100 struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
101 unsigned long flags;
102
103 iounmap(chan->user);
104
105 spin_lock_irqsave(&priv->lock, flags);
106 priv->channel[chan->chid] = NULL;
107 spin_unlock_irqrestore(&priv->lock, flags);
108
109 nouveau_gpuobj_ref(NULL, &chan->pushgpu);
110 nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
111 nouveau_namedb_destroy(&chan->base);
112}
113
114void
115_nouveau_fifo_channel_dtor(struct nouveau_object *object)
116{
117 struct nouveau_fifo_chan *chan = (void *)object;
118 nouveau_fifo_channel_destroy(chan);
119}
120
121u32
122_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
123{
124 struct nouveau_fifo_chan *chan = (void *)object;
125 return ioread32_native(chan->user + addr);
126}
127
128void
129_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
130{
131 struct nouveau_fifo_chan *chan = (void *)object;
132 iowrite32_native(data, chan->user + addr);
133}
134
135static int
136nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
137{
138 int engidx = nv_hclass(priv) & 0xff;
139
140 while (object && object->parent) {
141 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
142 (nv_hclass(object->parent) & 0xff) == engidx)
143 return nouveau_fifo_chan(object)->chid;
144 object = object->parent;
145 }
146
147 return -1;
148}
149
150void
151nouveau_fifo_destroy(struct nouveau_fifo *priv)
152{
153 kfree(priv->channel);
154 nouveau_engine_destroy(&priv->base);
155}
156
157int
158nouveau_fifo_create_(struct nouveau_object *parent,
159 struct nouveau_object *engine,
160 struct nouveau_oclass *oclass,
161 int min, int max, int length, void **pobject)
162{
163 struct nouveau_fifo *priv;
164 int ret;
165
166 ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
167 "fifo", length, pobject);
168 priv = *pobject;
169 if (ret)
170 return ret;
171
172 priv->min = min;
173 priv->max = max;
174 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
175 if (!priv->channel)
176 return -ENOMEM;
177
178 priv->chid = nouveau_fifo_chid;
179 spin_lock_init(&priv->lock);
180 return 0;
181}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
new file mode 100644
index 000000000000..ea76e3e8c9c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -0,0 +1,630 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/namedb.h>
29#include <core/handle.h>
30#include <core/ramht.h>
31
32#include <subdev/instmem.h>
33#include <subdev/instmem/nv04.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36
37#include <engine/fifo.h>
38
39#include "nv04.h"
40
41static struct ramfc_desc
42nv04_ramfc[] = {
43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
46 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
47 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
48 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
49 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
51 {}
52};
53
54/*******************************************************************************
55 * FIFO channel objects
56 ******************************************************************************/
57
58int
59nv04_fifo_object_attach(struct nouveau_object *parent,
60 struct nouveau_object *object, u32 handle)
61{
62 struct nv04_fifo_priv *priv = (void *)parent->engine;
63 struct nv04_fifo_chan *chan = (void *)parent;
64 u32 context, chid = chan->base.chid;
65 int ret;
66
67 if (nv_iclass(object, NV_GPUOBJ_CLASS))
68 context = nv_gpuobj(object)->addr >> 4;
69 else
70 context = 0x00000004; /* just non-zero */
71
72 switch (nv_engidx(object->engine)) {
73 case NVDEV_ENGINE_DMAOBJ:
74 case NVDEV_ENGINE_SW:
75 context |= 0x00000000;
76 break;
77 case NVDEV_ENGINE_GR:
78 context |= 0x00010000;
79 break;
80 case NVDEV_ENGINE_MPEG:
81 context |= 0x00020000;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 context |= 0x80000000; /* valid */
88 context |= chid << 24;
89
90 mutex_lock(&nv_subdev(priv)->mutex);
91 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
92 mutex_unlock(&nv_subdev(priv)->mutex);
93 return ret;
94}
95
96void
97nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
98{
99 struct nv04_fifo_priv *priv = (void *)parent->engine;
100 mutex_lock(&nv_subdev(priv)->mutex);
101 nouveau_ramht_remove(priv->ramht, cookie);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103}
104
105int
106nv04_fifo_context_attach(struct nouveau_object *parent,
107 struct nouveau_object *object)
108{
109 nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
110 return 0;
111}
112
113static int
114nv04_fifo_chan_ctor(struct nouveau_object *parent,
115 struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nv04_fifo_priv *priv = (void *)engine;
120 struct nv04_fifo_chan *chan;
121 struct nv03_channel_dma_class *args = data;
122 int ret;
123
124 if (size < sizeof(*args))
125 return -EINVAL;
126
127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
128 0x10000, args->pushbuf,
129 (1 << NVDEV_ENGINE_DMAOBJ) |
130 (1 << NVDEV_ENGINE_SW) |
131 (1 << NVDEV_ENGINE_GR), &chan);
132 *pobject = nv_object(chan);
133 if (ret)
134 return ret;
135
136 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
137 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
138 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
139 chan->ramfc = chan->base.chid * 32;
140
141 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
142 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
143 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
144 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
145 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
146 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
147#ifdef __BIG_ENDIAN
148 NV_PFIFO_CACHE1_BIG_ENDIAN |
149#endif
150 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
151 return 0;
152}
153
154void
155nv04_fifo_chan_dtor(struct nouveau_object *object)
156{
157 struct nv04_fifo_priv *priv = (void *)object->engine;
158 struct nv04_fifo_chan *chan = (void *)object;
159 struct ramfc_desc *c = priv->ramfc_desc;
160
161 do {
162 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
163 } while ((++c)->bits);
164
165 nouveau_fifo_channel_destroy(&chan->base);
166}
167
168int
169nv04_fifo_chan_init(struct nouveau_object *object)
170{
171 struct nv04_fifo_priv *priv = (void *)object->engine;
172 struct nv04_fifo_chan *chan = (void *)object;
173 u32 mask = 1 << chan->base.chid;
174 unsigned long flags;
175 int ret;
176
177 ret = nouveau_fifo_channel_init(&chan->base);
178 if (ret)
179 return ret;
180
181 spin_lock_irqsave(&priv->base.lock, flags);
182 nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
183 spin_unlock_irqrestore(&priv->base.lock, flags);
184 return 0;
185}
186
187int
188nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
189{
190 struct nv04_fifo_priv *priv = (void *)object->engine;
191 struct nv04_fifo_chan *chan = (void *)object;
192 struct nouveau_gpuobj *fctx = priv->ramfc;
193 struct ramfc_desc *c;
194 unsigned long flags;
195 u32 data = chan->ramfc;
196 u32 chid;
197
198 /* prevent fifo context switches */
199 spin_lock_irqsave(&priv->base.lock, flags);
200 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
201
202 /* if this channel is active, replace it with a null context */
203 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
204 if (chid == chan->base.chid) {
205 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
206 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
207 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
208
209 c = priv->ramfc_desc;
210 do {
211 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
212 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
213 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
214 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
215 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
216 } while ((++c)->bits);
217
218 c = priv->ramfc_desc;
219 do {
220 nv_wr32(priv, c->regp, 0x00000000);
221 } while ((++c)->bits);
222
223 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
224 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
225 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
226 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
227 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
228 }
229
230 /* restore normal operation, after disabling dma mode */
231 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
232 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
233 spin_unlock_irqrestore(&priv->base.lock, flags);
234
235 return nouveau_fifo_channel_fini(&chan->base, suspend);
236}
237
238static struct nouveau_ofuncs
239nv04_fifo_ofuncs = {
240 .ctor = nv04_fifo_chan_ctor,
241 .dtor = nv04_fifo_chan_dtor,
242 .init = nv04_fifo_chan_init,
243 .fini = nv04_fifo_chan_fini,
244 .rd32 = _nouveau_fifo_channel_rd32,
245 .wr32 = _nouveau_fifo_channel_wr32,
246};
247
248static struct nouveau_oclass
249nv04_fifo_sclass[] = {
250 { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
251 {}
252};
253
254/*******************************************************************************
255 * FIFO context - basically just the instmem reserved for the channel
256 ******************************************************************************/
257
258int
259nv04_fifo_context_ctor(struct nouveau_object *parent,
260 struct nouveau_object *engine,
261 struct nouveau_oclass *oclass, void *data, u32 size,
262 struct nouveau_object **pobject)
263{
264 struct nv04_fifo_base *base;
265 int ret;
266
267 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
268 0x1000, NVOBJ_FLAG_HEAP, &base);
269 *pobject = nv_object(base);
270 if (ret)
271 return ret;
272
273 return 0;
274}
275
276static struct nouveau_oclass
277nv04_fifo_cclass = {
278 .handle = NV_ENGCTX(FIFO, 0x04),
279 .ofuncs = &(struct nouveau_ofuncs) {
280 .ctor = nv04_fifo_context_ctor,
281 .dtor = _nouveau_fifo_context_dtor,
282 .init = _nouveau_fifo_context_init,
283 .fini = _nouveau_fifo_context_fini,
284 .rd32 = _nouveau_fifo_context_rd32,
285 .wr32 = _nouveau_fifo_context_wr32,
286 },
287};
288
289/*******************************************************************************
290 * PFIFO engine
291 ******************************************************************************/
292
293void
294nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
295__acquires(priv->base.lock)
296{
297 struct nv04_fifo_priv *priv = (void *)pfifo;
298 unsigned long flags;
299
300 spin_lock_irqsave(&priv->base.lock, flags);
301 *pflags = flags;
302
303 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
304 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
305
306 /* in some cases the puller may be left in an inconsistent state
307 * if you try to stop it while it's busy translating handles.
308 * sometimes you get a CACHE_ERROR, sometimes it just fails
309 * silently; sending incorrect instance offsets to PGRAPH after
310 * it's started up again.
311 *
312 * to avoid this, we invalidate the most recently calculated
313 * instance.
314 */
315 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
316 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
317 nv_warn(priv, "timeout idling puller\n");
318
319 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
320 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
321 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
322
323 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
324}
325
326void
327nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
328__releases(priv->base.lock)
329{
330 struct nv04_fifo_priv *priv = (void *)pfifo;
331 unsigned long flags = *pflags;
332
333 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
334 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
335
336 spin_unlock_irqrestore(&priv->base.lock, flags);
337}
338
339static const char *
340nv_dma_state_err(u32 state)
341{
342 static const char * const desc[] = {
343 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
344 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
345 };
346 return desc[(state >> 29) & 0x7];
347}
348
349static bool
350nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
351{
352 struct nv04_fifo_chan *chan = NULL;
353 struct nouveau_handle *bind;
354 const int subc = (addr >> 13) & 0x7;
355 const int mthd = addr & 0x1ffc;
356 bool handled = false;
357 unsigned long flags;
358 u32 engine;
359
360 spin_lock_irqsave(&priv->base.lock, flags);
361 if (likely(chid >= priv->base.min && chid <= priv->base.max))
362 chan = (void *)priv->base.channel[chid];
363 if (unlikely(!chan))
364 goto out;
365
366 switch (mthd) {
367 case 0x0000:
368 bind = nouveau_namedb_get(nv_namedb(chan), data);
369 if (unlikely(!bind))
370 break;
371
372 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
373 engine = 0x0000000f << (subc * 4);
374 chan->subc[subc] = data;
375 handled = true;
376
377 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
378 }
379
380 nouveau_namedb_put(bind);
381 break;
382 default:
383 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
384 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
385 break;
386
387 bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
388 if (likely(bind)) {
389 if (!nv_call(bind->object, mthd, data))
390 handled = true;
391 nouveau_namedb_put(bind);
392 }
393 break;
394 }
395
396out:
397 spin_unlock_irqrestore(&priv->base.lock, flags);
398 return handled;
399}
400
401void
402nv04_fifo_intr(struct nouveau_subdev *subdev)
403{
404 struct nouveau_device *device = nv_device(subdev);
405 struct nv04_fifo_priv *priv = (void *)subdev;
406 uint32_t status, reassign;
407 int cnt = 0;
408
409 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
410 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
411 uint32_t chid, get;
412
413 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
414
415 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
416 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
417
418 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
419 uint32_t mthd, data;
420 int ptr;
421
422 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
423 * wrapping on my G80 chips, but CACHE1 isn't big
424 * enough for this much data.. Tests show that it
425 * wraps around to the start at GET=0x800.. No clue
426 * as to why..
427 */
428 ptr = (get & 0x7ff) >> 2;
429
430 if (device->card_type < NV_40) {
431 mthd = nv_rd32(priv,
432 NV04_PFIFO_CACHE1_METHOD(ptr));
433 data = nv_rd32(priv,
434 NV04_PFIFO_CACHE1_DATA(ptr));
435 } else {
436 mthd = nv_rd32(priv,
437 NV40_PFIFO_CACHE1_METHOD(ptr));
438 data = nv_rd32(priv,
439 NV40_PFIFO_CACHE1_DATA(ptr));
440 }
441
442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
443 nv_info(priv, "CACHE_ERROR - Ch %d/%d "
444 "Mthd 0x%04x Data 0x%08x\n",
445 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
446 data);
447 }
448
449 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
450 nv_wr32(priv, NV03_PFIFO_INTR_0,
451 NV_PFIFO_INTR_CACHE_ERROR);
452
453 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
454 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
455 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
456 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
457 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
458 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
459
460 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
461 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
462 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
463
464 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
465 }
466
467 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
468 u32 dma_get = nv_rd32(priv, 0x003244);
469 u32 dma_put = nv_rd32(priv, 0x003240);
470 u32 push = nv_rd32(priv, 0x003220);
471 u32 state = nv_rd32(priv, 0x003228);
472
473 if (device->card_type == NV_50) {
474 u32 ho_get = nv_rd32(priv, 0x003328);
475 u32 ho_put = nv_rd32(priv, 0x003320);
476 u32 ib_get = nv_rd32(priv, 0x003334);
477 u32 ib_put = nv_rd32(priv, 0x003330);
478
479 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
481 "State 0x%08x (err: %s) Push 0x%08x\n",
482 chid, ho_get, dma_get, ho_put,
483 dma_put, ib_get, ib_put, state,
484 nv_dma_state_err(state),
485 push);
486
487 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
488 nv_wr32(priv, 0x003364, 0x00000000);
489 if (dma_get != dma_put || ho_get != ho_put) {
490 nv_wr32(priv, 0x003244, dma_put);
491 nv_wr32(priv, 0x003328, ho_put);
492 } else
493 if (ib_get != ib_put) {
494 nv_wr32(priv, 0x003334, ib_put);
495 }
496 } else {
497 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
499 chid, dma_get, dma_put, state,
500 nv_dma_state_err(state), push);
501
502 if (dma_get != dma_put)
503 nv_wr32(priv, 0x003244, dma_put);
504 }
505
506 nv_wr32(priv, 0x003228, 0x00000000);
507 nv_wr32(priv, 0x003220, 0x00000001);
508 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
509 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
510 }
511
512 if (status & NV_PFIFO_INTR_SEMAPHORE) {
513 uint32_t sem;
514
515 status &= ~NV_PFIFO_INTR_SEMAPHORE;
516 nv_wr32(priv, NV03_PFIFO_INTR_0,
517 NV_PFIFO_INTR_SEMAPHORE);
518
519 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
520 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
521
522 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
523 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
524 }
525
526 if (device->card_type == NV_50) {
527 if (status & 0x00000010) {
528 nv50_fb_trap(nouveau_fb(priv), 1);
529 status &= ~0x00000010;
530 nv_wr32(priv, 0x002100, 0x00000010);
531 }
532 }
533
534 if (status) {
535 nv_info(priv, "unknown intr 0x%08x, ch %d\n",
536 status, chid);
537 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
538 status = 0;
539 }
540
541 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
542 }
543
544 if (status) {
545 nv_info(priv, "still angry after %d spins, halt\n", cnt);
546 nv_wr32(priv, 0x002140, 0);
547 nv_wr32(priv, 0x000140, 0);
548 }
549
550 nv_wr32(priv, 0x000100, 0x00000100);
551}
552
553static int
554nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
555 struct nouveau_oclass *oclass, void *data, u32 size,
556 struct nouveau_object **pobject)
557{
558 struct nv04_instmem_priv *imem = nv04_instmem(parent);
559 struct nv04_fifo_priv *priv;
560 int ret;
561
562 ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
563 *pobject = nv_object(priv);
564 if (ret)
565 return ret;
566
567 nouveau_ramht_ref(imem->ramht, &priv->ramht);
568 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
569 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
570
571 nv_subdev(priv)->unit = 0x00000100;
572 nv_subdev(priv)->intr = nv04_fifo_intr;
573 nv_engine(priv)->cclass = &nv04_fifo_cclass;
574 nv_engine(priv)->sclass = nv04_fifo_sclass;
575 priv->base.pause = nv04_fifo_pause;
576 priv->base.start = nv04_fifo_start;
577 priv->ramfc_desc = nv04_ramfc;
578 return 0;
579}
580
581void
582nv04_fifo_dtor(struct nouveau_object *object)
583{
584 struct nv04_fifo_priv *priv = (void *)object;
585 nouveau_gpuobj_ref(NULL, &priv->ramfc);
586 nouveau_gpuobj_ref(NULL, &priv->ramro);
587 nouveau_ramht_ref(NULL, &priv->ramht);
588 nouveau_fifo_destroy(&priv->base);
589}
590
591int
592nv04_fifo_init(struct nouveau_object *object)
593{
594 struct nv04_fifo_priv *priv = (void *)object;
595 int ret;
596
597 ret = nouveau_fifo_init(&priv->base);
598 if (ret)
599 return ret;
600
601 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
602 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
603
604 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
605 ((priv->ramht->bits - 9) << 16) |
606 (priv->ramht->base.addr >> 8));
607 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
608 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
609
610 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
611
612 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
613 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
614
615 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
616 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
617 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
618 return 0;
619}
620
621struct nouveau_oclass
622nv04_fifo_oclass = {
623 .handle = NV_ENGINE(FIFO, 0x04),
624 .ofuncs = &(struct nouveau_ofuncs) {
625 .ctor = nv04_fifo_ctor,
626 .dtor = nv04_fifo_dtor,
627 .init = nv04_fifo_init,
628 .fini = _nouveau_fifo_fini,
629 },
630};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 000000000000..496a4b4fdfaf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
1#ifndef __NV04_FIFO_H__
2#define __NV04_FIFO_H__
3
4#include <engine/fifo.h>
5
6#define NV04_PFIFO_DELAY_0 0x00002040
7#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
8#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
9#define NV03_PFIFO_INTR_0 0x00002100
10#define NV03_PFIFO_INTR_EN_0 0x00002140
11# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
12# define NV_PFIFO_INTR_RUNOUT (1<<4)
13# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
14# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
15# define NV_PFIFO_INTR_DMA_PT (1<<16)
16# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
17# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
18#define NV03_PFIFO_RAMHT 0x00002210
19#define NV03_PFIFO_RAMFC 0x00002214
20#define NV03_PFIFO_RAMRO 0x00002218
21#define NV40_PFIFO_RAMFC 0x00002220
22#define NV03_PFIFO_CACHES 0x00002500
23#define NV04_PFIFO_MODE 0x00002504
24#define NV04_PFIFO_DMA 0x00002508
25#define NV04_PFIFO_SIZE 0x0000250c
26#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
27#define NV50_PFIFO_CTX_TABLE__SIZE 128
28#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
29#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
30#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
31#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
32#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
33#define NV03_PFIFO_CACHE0_PULL0 0x00003040
34#define NV04_PFIFO_CACHE0_PULL0 0x00003050
35#define NV04_PFIFO_CACHE0_PULL1 0x00003054
36#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
37#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
38#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
39#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
40#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
41#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
42#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
43#define NV03_PFIFO_CACHE1_PUT 0x00003210
44#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
45#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
46# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
47# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
48# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
49# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
50# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
51# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
52# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
53# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
54# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
55# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
56# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
57# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
58# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
59# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
60# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
61# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
62# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
63# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
64# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
65# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
66# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
67# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
68# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
69# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
70# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
71# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
72# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
73# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
74# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
75# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
76# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
77# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
78# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
79# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
80# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
81# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
82# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
83# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
84# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
85# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
86# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
87# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
88# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
89# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
90# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
91# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
92# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
93# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
94# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
95# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
96# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
97# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
98# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
99# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
100# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
101# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
102# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
103# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
104# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
105# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
106# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
107#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
108#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
109#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
110#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
111#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
112#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
113#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
114#define NV03_PFIFO_CACHE1_PULL0 0x00003240
115#define NV04_PFIFO_CACHE1_PULL0 0x00003250
116# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
117# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
118#define NV03_PFIFO_CACHE1_PULL1 0x00003250
119#define NV04_PFIFO_CACHE1_PULL1 0x00003254
120#define NV04_PFIFO_CACHE1_HASH 0x00003258
121#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
122#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
123#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
124#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
125#define NV03_PFIFO_CACHE1_GET 0x00003270
126#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
127#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
128#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
129#define NV40_PFIFO_UNK32E4 0x000032E4
130#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
131#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
132#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
133#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
134
135struct ramfc_desc {
136 unsigned bits:6;
137 unsigned ctxs:5;
138 unsigned ctxp:8;
139 unsigned regs:5;
140 unsigned regp;
141};
142
143struct nv04_fifo_priv {
144 struct nouveau_fifo base;
145 struct ramfc_desc *ramfc_desc;
146 struct nouveau_ramht *ramht;
147 struct nouveau_gpuobj *ramro;
148 struct nouveau_gpuobj *ramfc;
149};
150
151struct nv04_fifo_base {
152 struct nouveau_fifo_base base;
153};
154
155struct nv04_fifo_chan {
156 struct nouveau_fifo_chan base;
157 u32 subc[8];
158 u32 ramfc;
159};
160
161int nv04_fifo_object_attach(struct nouveau_object *,
162 struct nouveau_object *, u32);
163void nv04_fifo_object_detach(struct nouveau_object *, int);
164
165void nv04_fifo_chan_dtor(struct nouveau_object *);
166int nv04_fifo_chan_init(struct nouveau_object *);
167int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
168
169int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
170 struct nouveau_oclass *, void *, u32,
171 struct nouveau_object **);
172
173void nv04_fifo_dtor(struct nouveau_object *);
174int nv04_fifo_init(struct nouveau_object *);
175void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
176void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
177
178#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
new file mode 100644
index 000000000000..4ba75422b89d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv10_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 {}
50};
51
52/*******************************************************************************
53 * FIFO channel objects
54 ******************************************************************************/
55
56static int
57nv10_fifo_chan_ctor(struct nouveau_object *parent,
58 struct nouveau_object *engine,
59 struct nouveau_oclass *oclass, void *data, u32 size,
60 struct nouveau_object **pobject)
61{
62 struct nv04_fifo_priv *priv = (void *)engine;
63 struct nv04_fifo_chan *chan;
64 struct nv03_channel_dma_class *args = data;
65 int ret;
66
67 if (size < sizeof(*args))
68 return -EINVAL;
69
70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
71 0x10000, args->pushbuf,
72 (1 << NVDEV_ENGINE_DMAOBJ) |
73 (1 << NVDEV_ENGINE_SW) |
74 (1 << NVDEV_ENGINE_GR), &chan);
75 *pobject = nv_object(chan);
76 if (ret)
77 return ret;
78
79 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
80 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
81 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
82 chan->ramfc = chan->base.chid * 32;
83
84 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
85 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
86 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
87 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
88 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
89 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
90#ifdef __BIG_ENDIAN
91 NV_PFIFO_CACHE1_BIG_ENDIAN |
92#endif
93 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
94 return 0;
95}
96
97static struct nouveau_ofuncs
98nv10_fifo_ofuncs = {
99 .ctor = nv10_fifo_chan_ctor,
100 .dtor = nv04_fifo_chan_dtor,
101 .init = nv04_fifo_chan_init,
102 .fini = nv04_fifo_chan_fini,
103 .rd32 = _nouveau_fifo_channel_rd32,
104 .wr32 = _nouveau_fifo_channel_wr32,
105};
106
107static struct nouveau_oclass
108nv10_fifo_sclass[] = {
109 { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
110 {}
111};
112
113/*******************************************************************************
114 * FIFO context - basically just the instmem reserved for the channel
115 ******************************************************************************/
116
117static struct nouveau_oclass
118nv10_fifo_cclass = {
119 .handle = NV_ENGCTX(FIFO, 0x10),
120 .ofuncs = &(struct nouveau_ofuncs) {
121 .ctor = nv04_fifo_context_ctor,
122 .dtor = _nouveau_fifo_context_dtor,
123 .init = _nouveau_fifo_context_init,
124 .fini = _nouveau_fifo_context_fini,
125 .rd32 = _nouveau_fifo_context_rd32,
126 .wr32 = _nouveau_fifo_context_wr32,
127 },
128};
129
130/*******************************************************************************
131 * PFIFO engine
132 ******************************************************************************/
133
134static int
135nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
136 struct nouveau_oclass *oclass, void *data, u32 size,
137 struct nouveau_object **pobject)
138{
139 struct nv04_instmem_priv *imem = nv04_instmem(parent);
140 struct nv04_fifo_priv *priv;
141 int ret;
142
143 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
144 *pobject = nv_object(priv);
145 if (ret)
146 return ret;
147
148 nouveau_ramht_ref(imem->ramht, &priv->ramht);
149 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
150 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
151
152 nv_subdev(priv)->unit = 0x00000100;
153 nv_subdev(priv)->intr = nv04_fifo_intr;
154 nv_engine(priv)->cclass = &nv10_fifo_cclass;
155 nv_engine(priv)->sclass = nv10_fifo_sclass;
156 priv->base.pause = nv04_fifo_pause;
157 priv->base.start = nv04_fifo_start;
158 priv->ramfc_desc = nv10_ramfc;
159 return 0;
160}
161
162struct nouveau_oclass
163nv10_fifo_oclass = {
164 .handle = NV_ENGINE(FIFO, 0x10),
165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = nv10_fifo_ctor,
167 .dtor = nv04_fifo_dtor,
168 .init = nv04_fifo_init,
169 .fini = _nouveau_fifo_fini,
170 },
171};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
new file mode 100644
index 000000000000..b96e6b0ae2b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -0,0 +1,208 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv17_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
54 {}
55};
56
57/*******************************************************************************
58 * FIFO channel objects
59 ******************************************************************************/
60
61static int
62nv17_fifo_chan_ctor(struct nouveau_object *parent,
63 struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
66{
67 struct nv04_fifo_priv *priv = (void *)engine;
68 struct nv04_fifo_chan *chan;
69 struct nv03_channel_dma_class *args = data;
70 int ret;
71
72 if (size < sizeof(*args))
73 return -EINVAL;
74
75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
76 0x10000, args->pushbuf,
77 (1 << NVDEV_ENGINE_DMAOBJ) |
78 (1 << NVDEV_ENGINE_SW) |
79 (1 << NVDEV_ENGINE_GR) |
80 (1 << NVDEV_ENGINE_MPEG), /* NV31- */
81 &chan);
82 *pobject = nv_object(chan);
83 if (ret)
84 return ret;
85
86 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
87 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
88 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
89 chan->ramfc = chan->base.chid * 64;
90
91 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
92 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
93 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
94 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
95 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
96 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
97#ifdef __BIG_ENDIAN
98 NV_PFIFO_CACHE1_BIG_ENDIAN |
99#endif
100 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
101 return 0;
102}
103
104static struct nouveau_ofuncs
105nv17_fifo_ofuncs = {
106 .ctor = nv17_fifo_chan_ctor,
107 .dtor = nv04_fifo_chan_dtor,
108 .init = nv04_fifo_chan_init,
109 .fini = nv04_fifo_chan_fini,
110 .rd32 = _nouveau_fifo_channel_rd32,
111 .wr32 = _nouveau_fifo_channel_wr32,
112};
113
114static struct nouveau_oclass
115nv17_fifo_sclass[] = {
116 { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
117 {}
118};
119
120/*******************************************************************************
121 * FIFO context - basically just the instmem reserved for the channel
122 ******************************************************************************/
123
124static struct nouveau_oclass
125nv17_fifo_cclass = {
126 .handle = NV_ENGCTX(FIFO, 0x17),
127 .ofuncs = &(struct nouveau_ofuncs) {
128 .ctor = nv04_fifo_context_ctor,
129 .dtor = _nouveau_fifo_context_dtor,
130 .init = _nouveau_fifo_context_init,
131 .fini = _nouveau_fifo_context_fini,
132 .rd32 = _nouveau_fifo_context_rd32,
133 .wr32 = _nouveau_fifo_context_wr32,
134 },
135};
136
137/*******************************************************************************
138 * PFIFO engine
139 ******************************************************************************/
140
141static int
142nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv04_instmem_priv *imem = nv04_instmem(parent);
147 struct nv04_fifo_priv *priv;
148 int ret;
149
150 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
151 *pobject = nv_object(priv);
152 if (ret)
153 return ret;
154
155 nouveau_ramht_ref(imem->ramht, &priv->ramht);
156 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
157 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
158
159 nv_subdev(priv)->unit = 0x00000100;
160 nv_subdev(priv)->intr = nv04_fifo_intr;
161 nv_engine(priv)->cclass = &nv17_fifo_cclass;
162 nv_engine(priv)->sclass = nv17_fifo_sclass;
163 priv->base.pause = nv04_fifo_pause;
164 priv->base.start = nv04_fifo_start;
165 priv->ramfc_desc = nv17_ramfc;
166 return 0;
167}
168
169static int
170nv17_fifo_init(struct nouveau_object *object)
171{
172 struct nv04_fifo_priv *priv = (void *)object;
173 int ret;
174
175 ret = nouveau_fifo_init(&priv->base);
176 if (ret)
177 return ret;
178
179 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
180 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
181
182 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
183 ((priv->ramht->bits - 9) << 16) |
184 (priv->ramht->base.addr >> 8));
185 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
186 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
187
188 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
189
190 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
191 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
192
193 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
194 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
195 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
196 return 0;
197}
198
199struct nouveau_oclass
200nv17_fifo_oclass = {
201 .handle = NV_ENGINE(FIFO, 0x17),
202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv17_fifo_ctor,
204 .dtor = nv04_fifo_dtor,
205 .init = nv17_fifo_init,
206 .fini = _nouveau_fifo_fini,
207 },
208};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
new file mode 100644
index 000000000000..559c3b4e1b86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -0,0 +1,349 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv40_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 2, 28, 0x18, 28, 0x002058 },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
56 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
57 { 32, 0, 0x40, 0, 0x0032e4 },
58 { 32, 0, 0x44, 0, 0x0032e8 },
59 { 32, 0, 0x4c, 0, 0x002088 },
60 { 32, 0, 0x50, 0, 0x003300 },
61 { 32, 0, 0x54, 0, 0x00330c },
62 {}
63};
64
65/*******************************************************************************
66 * FIFO channel objects
67 ******************************************************************************/
68
69static int
70nv40_fifo_object_attach(struct nouveau_object *parent,
71 struct nouveau_object *object, u32 handle)
72{
73 struct nv04_fifo_priv *priv = (void *)parent->engine;
74 struct nv04_fifo_chan *chan = (void *)parent;
75 u32 context, chid = chan->base.chid;
76 int ret;
77
78 if (nv_iclass(object, NV_GPUOBJ_CLASS))
79 context = nv_gpuobj(object)->addr >> 4;
80 else
81 context = 0x00000004; /* just non-zero */
82
83 switch (nv_engidx(object->engine)) {
84 case NVDEV_ENGINE_DMAOBJ:
85 case NVDEV_ENGINE_SW:
86 context |= 0x00000000;
87 break;
88 case NVDEV_ENGINE_GR:
89 context |= 0x00100000;
90 break;
91 case NVDEV_ENGINE_MPEG:
92 context |= 0x00200000;
93 break;
94 default:
95 return -EINVAL;
96 }
97
98 context |= chid << 23;
99
100 mutex_lock(&nv_subdev(priv)->mutex);
101 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103 return ret;
104}
105
106static int
107nv40_fifo_context_attach(struct nouveau_object *parent,
108 struct nouveau_object *engctx)
109{
110 struct nv04_fifo_priv *priv = (void *)parent->engine;
111 struct nv04_fifo_chan *chan = (void *)parent;
112 unsigned long flags;
113 u32 reg, ctx;
114
115 switch (nv_engidx(engctx->engine)) {
116 case NVDEV_ENGINE_SW:
117 return 0;
118 case NVDEV_ENGINE_GR:
119 reg = 0x32e0;
120 ctx = 0x38;
121 break;
122 case NVDEV_ENGINE_MPEG:
123 reg = 0x330c;
124 ctx = 0x54;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 spin_lock_irqsave(&priv->base.lock, flags);
131 nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
132 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
133
134 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
135 nv_wr32(priv, reg, nv_engctx(engctx)->addr);
136 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
137
138 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
139 spin_unlock_irqrestore(&priv->base.lock, flags);
140 return 0;
141}
142
143static int
144nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
145 struct nouveau_object *engctx)
146{
147 struct nv04_fifo_priv *priv = (void *)parent->engine;
148 struct nv04_fifo_chan *chan = (void *)parent;
149 unsigned long flags;
150 u32 reg, ctx;
151
152 switch (nv_engidx(engctx->engine)) {
153 case NVDEV_ENGINE_SW:
154 return 0;
155 case NVDEV_ENGINE_GR:
156 reg = 0x32e0;
157 ctx = 0x38;
158 break;
159 case NVDEV_ENGINE_MPEG:
160 reg = 0x330c;
161 ctx = 0x54;
162 break;
163 default:
164 return -EINVAL;
165 }
166
167 spin_lock_irqsave(&priv->base.lock, flags);
168 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
169
170 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
171 nv_wr32(priv, reg, 0x00000000);
172 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
173
174 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
175 spin_unlock_irqrestore(&priv->base.lock, flags);
176 return 0;
177}
178
179static int
180nv40_fifo_chan_ctor(struct nouveau_object *parent,
181 struct nouveau_object *engine,
182 struct nouveau_oclass *oclass, void *data, u32 size,
183 struct nouveau_object **pobject)
184{
185 struct nv04_fifo_priv *priv = (void *)engine;
186 struct nv04_fifo_chan *chan;
187 struct nv03_channel_dma_class *args = data;
188 int ret;
189
190 if (size < sizeof(*args))
191 return -EINVAL;
192
193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
194 0x1000, args->pushbuf,
195 (1 << NVDEV_ENGINE_DMAOBJ) |
196 (1 << NVDEV_ENGINE_SW) |
197 (1 << NVDEV_ENGINE_GR) |
198 (1 << NVDEV_ENGINE_MPEG), &chan);
199 *pobject = nv_object(chan);
200 if (ret)
201 return ret;
202
203 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
204 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
205 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
206 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
207 chan->ramfc = chan->base.chid * 128;
208
209 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
210 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
211 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
212 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
213 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
214 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
215#ifdef __BIG_ENDIAN
216 NV_PFIFO_CACHE1_BIG_ENDIAN |
217#endif
218 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
219 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
220 return 0;
221}
222
223static struct nouveau_ofuncs
224nv40_fifo_ofuncs = {
225 .ctor = nv40_fifo_chan_ctor,
226 .dtor = nv04_fifo_chan_dtor,
227 .init = nv04_fifo_chan_init,
228 .fini = nv04_fifo_chan_fini,
229 .rd32 = _nouveau_fifo_channel_rd32,
230 .wr32 = _nouveau_fifo_channel_wr32,
231};
232
233static struct nouveau_oclass
234nv40_fifo_sclass[] = {
235 { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
236 {}
237};
238
239/*******************************************************************************
240 * FIFO context - basically just the instmem reserved for the channel
241 ******************************************************************************/
242
243static struct nouveau_oclass
244nv40_fifo_cclass = {
245 .handle = NV_ENGCTX(FIFO, 0x40),
246 .ofuncs = &(struct nouveau_ofuncs) {
247 .ctor = nv04_fifo_context_ctor,
248 .dtor = _nouveau_fifo_context_dtor,
249 .init = _nouveau_fifo_context_init,
250 .fini = _nouveau_fifo_context_fini,
251 .rd32 = _nouveau_fifo_context_rd32,
252 .wr32 = _nouveau_fifo_context_wr32,
253 },
254};
255
256/*******************************************************************************
257 * PFIFO engine
258 ******************************************************************************/
259
260static int
261nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
262 struct nouveau_oclass *oclass, void *data, u32 size,
263 struct nouveau_object **pobject)
264{
265 struct nv04_instmem_priv *imem = nv04_instmem(parent);
266 struct nv04_fifo_priv *priv;
267 int ret;
268
269 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
270 *pobject = nv_object(priv);
271 if (ret)
272 return ret;
273
274 nouveau_ramht_ref(imem->ramht, &priv->ramht);
275 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
276 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
277
278 nv_subdev(priv)->unit = 0x00000100;
279 nv_subdev(priv)->intr = nv04_fifo_intr;
280 nv_engine(priv)->cclass = &nv40_fifo_cclass;
281 nv_engine(priv)->sclass = nv40_fifo_sclass;
282 priv->base.pause = nv04_fifo_pause;
283 priv->base.start = nv04_fifo_start;
284 priv->ramfc_desc = nv40_ramfc;
285 return 0;
286}
287
288static int
289nv40_fifo_init(struct nouveau_object *object)
290{
291 struct nv04_fifo_priv *priv = (void *)object;
292 struct nouveau_fb *pfb = nouveau_fb(object);
293 int ret;
294
295 ret = nouveau_fifo_init(&priv->base);
296 if (ret)
297 return ret;
298
299 nv_wr32(priv, 0x002040, 0x000000ff);
300 nv_wr32(priv, 0x002044, 0x2101ffff);
301 nv_wr32(priv, 0x002058, 0x00000001);
302
303 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
304 ((priv->ramht->bits - 9) << 16) |
305 (priv->ramht->base.addr >> 8));
306 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
307
308 switch (nv_device(priv)->chipset) {
309 case 0x47:
310 case 0x49:
311 case 0x4b:
312 nv_wr32(priv, 0x002230, 0x00000001);
313 case 0x40:
314 case 0x41:
315 case 0x42:
316 case 0x43:
317 case 0x45:
318 case 0x48:
319 nv_wr32(priv, 0x002220, 0x00030002);
320 break;
321 default:
322 nv_wr32(priv, 0x002230, 0x00000000);
323 nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
324 priv->ramfc->addr) >> 16) |
325 0x00030000);
326 break;
327 }
328
329 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
330
331 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
332 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
333
334 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
335 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
336 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
337 return 0;
338}
339
340struct nouveau_oclass
341nv40_fifo_oclass = {
342 .handle = NV_ENGINE(FIFO, 0x40),
343 .ofuncs = &(struct nouveau_ofuncs) {
344 .ctor = nv40_fifo_ctor,
345 .dtor = nv04_fifo_dtor,
346 .init = nv40_fifo_init,
347 .fini = _nouveau_fifo_fini,
348 },
349};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
new file mode 100644
index 000000000000..536e7634a00d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/engctx.h>
27#include <core/ramht.h>
28#include <core/class.h>
29#include <core/math.h>
30
31#include <subdev/timer.h>
32#include <subdev/bar.h>
33
34#include <engine/dmaobj.h>
35#include <engine/fifo.h>
36
37#include "nv50.h"
38
39/*******************************************************************************
40 * FIFO channel objects
41 ******************************************************************************/
42
43void
44nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
45{
46 struct nouveau_bar *bar = nouveau_bar(priv);
47 struct nouveau_gpuobj *cur;
48 int i, p;
49
50 cur = priv->playlist[priv->cur_playlist];
51 priv->cur_playlist = !priv->cur_playlist;
52
53 for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
54 if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
55 nv_wo32(cur, p++ * 4, i);
56 }
57
58 bar->flush(bar);
59
60 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
61 nv_wr32(priv, 0x0032ec, p);
62 nv_wr32(priv, 0x002500, 0x00000101);
63}
64
65static int
66nv50_fifo_context_attach(struct nouveau_object *parent,
67 struct nouveau_object *object)
68{
69 struct nouveau_bar *bar = nouveau_bar(parent);
70 struct nv50_fifo_base *base = (void *)parent->parent;
71 struct nouveau_gpuobj *ectx = (void *)object;
72 u64 limit = ectx->addr + ectx->size - 1;
73 u64 start = ectx->addr;
74 u32 addr;
75
76 switch (nv_engidx(object->engine)) {
77 case NVDEV_ENGINE_SW : return 0;
78 case NVDEV_ENGINE_GR : addr = 0x0000; break;
79 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
80 default:
81 return -EINVAL;
82 }
83
84 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
85 nv_wo32(base->eng, addr + 0x00, 0x00190000);
86 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
87 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
88 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
89 upper_32_bits(start));
90 nv_wo32(base->eng, addr + 0x10, 0x00000000);
91 nv_wo32(base->eng, addr + 0x14, 0x00000000);
92 bar->flush(bar);
93 return 0;
94}
95
96static int
97nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
98 struct nouveau_object *object)
99{
100 struct nouveau_bar *bar = nouveau_bar(parent);
101 struct nv50_fifo_priv *priv = (void *)parent->engine;
102 struct nv50_fifo_base *base = (void *)parent->parent;
103 struct nv50_fifo_chan *chan = (void *)parent;
104 u32 addr, me;
105 int ret = 0;
106
107 switch (nv_engidx(object->engine)) {
108 case NVDEV_ENGINE_SW : return 0;
109 case NVDEV_ENGINE_GR : addr = 0x0000; break;
110 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
111 default:
112 return -EINVAL;
113 }
114
115 nv_wo32(base->eng, addr + 0x00, 0x00000000);
116 nv_wo32(base->eng, addr + 0x04, 0x00000000);
117 nv_wo32(base->eng, addr + 0x08, 0x00000000);
118 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
119 nv_wo32(base->eng, addr + 0x10, 0x00000000);
120 nv_wo32(base->eng, addr + 0x14, 0x00000000);
121 bar->flush(bar);
122
123 /* HW bug workaround:
124 *
125 * PFIFO will hang forever if the connected engines don't report
126 * that they've processed the context switch request.
127 *
128 * In order for the kickoff to work, we need to ensure all the
129 * connected engines are in a state where they can answer.
130 *
131 * Newer chipsets don't seem to suffer from this issue, and well,
132 * there's also a "ignore these engines" bitmask reg we can use
133 * if we hit the issue there..
134 */
135 me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
136
137 /* do the kickoff... */
138 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
139 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
140 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
141 if (suspend)
142 ret = -EBUSY;
143 }
144
145 nv_wr32(priv, 0x00b860, me);
146 return ret;
147}
148
149static int
150nv50_fifo_object_attach(struct nouveau_object *parent,
151 struct nouveau_object *object, u32 handle)
152{
153 struct nv50_fifo_chan *chan = (void *)parent;
154 u32 context;
155
156 if (nv_iclass(object, NV_GPUOBJ_CLASS))
157 context = nv_gpuobj(object)->node->offset >> 4;
158 else
159 context = 0x00000004; /* just non-zero */
160
161 switch (nv_engidx(object->engine)) {
162 case NVDEV_ENGINE_DMAOBJ:
163 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
164 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
165 case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
166 default:
167 return -EINVAL;
168 }
169
170 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
171}
172
173void
174nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
175{
176 struct nv50_fifo_chan *chan = (void *)parent;
177 nouveau_ramht_remove(chan->ramht, cookie);
178}
179
180static int
181nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
182 struct nouveau_object *engine,
183 struct nouveau_oclass *oclass, void *data, u32 size,
184 struct nouveau_object **pobject)
185{
186 struct nouveau_bar *bar = nouveau_bar(parent);
187 struct nv50_fifo_base *base = (void *)parent;
188 struct nv50_fifo_chan *chan;
189 struct nv03_channel_dma_class *args = data;
190 int ret;
191
192 if (size < sizeof(*args))
193 return -EINVAL;
194
195 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
196 0x2000, args->pushbuf,
197 (1 << NVDEV_ENGINE_DMAOBJ) |
198 (1 << NVDEV_ENGINE_SW) |
199 (1 << NVDEV_ENGINE_GR) |
200 (1 << NVDEV_ENGINE_MPEG), &chan);
201 *pobject = nv_object(chan);
202 if (ret)
203 return ret;
204
205 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
206 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
207 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
208 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
209
210 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
211 if (ret)
212 return ret;
213
214 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
215 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
216 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
217 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
218 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
219 nv_wo32(base->ramfc, 0x44, 0x01003fff);
220 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
221 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
222 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
223 nv_wo32(base->ramfc, 0x78, 0x00000000);
224 nv_wo32(base->ramfc, 0x7c, 0x30000001);
225 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
226 (4 << 24) /* SEARCH_FULL */ |
227 (chan->ramht->base.node->offset >> 4));
228 bar->flush(bar);
229 return 0;
230}
231
232static int
233nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
234 struct nouveau_object *engine,
235 struct nouveau_oclass *oclass, void *data, u32 size,
236 struct nouveau_object **pobject)
237{
238 struct nv50_channel_ind_class *args = data;
239 struct nouveau_bar *bar = nouveau_bar(parent);
240 struct nv50_fifo_base *base = (void *)parent;
241 struct nv50_fifo_chan *chan;
242 u64 ioffset, ilength;
243 int ret;
244
245 if (size < sizeof(*args))
246 return -EINVAL;
247
248 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
249 0x2000, args->pushbuf,
250 (1 << NVDEV_ENGINE_DMAOBJ) |
251 (1 << NVDEV_ENGINE_SW) |
252 (1 << NVDEV_ENGINE_GR) |
253 (1 << NVDEV_ENGINE_MPEG), &chan);
254 *pobject = nv_object(chan);
255 if (ret)
256 return ret;
257
258 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
259 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
260 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
261 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
262
263 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
264 if (ret)
265 return ret;
266
267 ioffset = args->ioffset;
268 ilength = log2i(args->ilength / 8);
269
270 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
271 nv_wo32(base->ramfc, 0x44, 0x01003fff);
272 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
273 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
274 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
275 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
276 nv_wo32(base->ramfc, 0x78, 0x00000000);
277 nv_wo32(base->ramfc, 0x7c, 0x30000001);
278 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
279 (4 << 24) /* SEARCH_FULL */ |
280 (chan->ramht->base.node->offset >> 4));
281 bar->flush(bar);
282 return 0;
283}
284
285void
286nv50_fifo_chan_dtor(struct nouveau_object *object)
287{
288 struct nv50_fifo_chan *chan = (void *)object;
289 nouveau_ramht_ref(NULL, &chan->ramht);
290 nouveau_fifo_channel_destroy(&chan->base);
291}
292
293static int
294nv50_fifo_chan_init(struct nouveau_object *object)
295{
296 struct nv50_fifo_priv *priv = (void *)object->engine;
297 struct nv50_fifo_base *base = (void *)object->parent;
298 struct nv50_fifo_chan *chan = (void *)object;
299 struct nouveau_gpuobj *ramfc = base->ramfc;
300 u32 chid = chan->base.chid;
301 int ret;
302
303 ret = nouveau_fifo_channel_init(&chan->base);
304 if (ret)
305 return ret;
306
307 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
308 nv50_fifo_playlist_update(priv);
309 return 0;
310}
311
312int
313nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
314{
315 struct nv50_fifo_priv *priv = (void *)object->engine;
316 struct nv50_fifo_chan *chan = (void *)object;
317 u32 chid = chan->base.chid;
318
319 /* remove channel from playlist, fifo will unload context */
320 nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
321 nv50_fifo_playlist_update(priv);
322 nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
323
324 return nouveau_fifo_channel_fini(&chan->base, suspend);
325}
326
327static struct nouveau_ofuncs
328nv50_fifo_ofuncs_dma = {
329 .ctor = nv50_fifo_chan_ctor_dma,
330 .dtor = nv50_fifo_chan_dtor,
331 .init = nv50_fifo_chan_init,
332 .fini = nv50_fifo_chan_fini,
333 .rd32 = _nouveau_fifo_channel_rd32,
334 .wr32 = _nouveau_fifo_channel_wr32,
335};
336
337static struct nouveau_ofuncs
338nv50_fifo_ofuncs_ind = {
339 .ctor = nv50_fifo_chan_ctor_ind,
340 .dtor = nv50_fifo_chan_dtor,
341 .init = nv50_fifo_chan_init,
342 .fini = nv50_fifo_chan_fini,
343 .rd32 = _nouveau_fifo_channel_rd32,
344 .wr32 = _nouveau_fifo_channel_wr32,
345};
346
347static struct nouveau_oclass
348nv50_fifo_sclass[] = {
349 { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
350 { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
351 {}
352};
353
354/*******************************************************************************
355 * FIFO context - basically just the instmem reserved for the channel
356 ******************************************************************************/
357
358static int
359nv50_fifo_context_ctor(struct nouveau_object *parent,
360 struct nouveau_object *engine,
361 struct nouveau_oclass *oclass, void *data, u32 size,
362 struct nouveau_object **pobject)
363{
364 struct nv50_fifo_base *base;
365 int ret;
366
367 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
368 0x1000, NVOBJ_FLAG_HEAP, &base);
369 *pobject = nv_object(base);
370 if (ret)
371 return ret;
372
373 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
374 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
375 if (ret)
376 return ret;
377
378 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
379 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
380 if (ret)
381 return ret;
382
383 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
384 &base->pgd);
385 if (ret)
386 return ret;
387
388 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
389 if (ret)
390 return ret;
391
392 return 0;
393}
394
395void
396nv50_fifo_context_dtor(struct nouveau_object *object)
397{
398 struct nv50_fifo_base *base = (void *)object;
399 nouveau_vm_ref(NULL, &base->vm, base->pgd);
400 nouveau_gpuobj_ref(NULL, &base->pgd);
401 nouveau_gpuobj_ref(NULL, &base->eng);
402 nouveau_gpuobj_ref(NULL, &base->ramfc);
403 nouveau_gpuobj_ref(NULL, &base->cache);
404 nouveau_fifo_context_destroy(&base->base);
405}
406
407static struct nouveau_oclass
408nv50_fifo_cclass = {
409 .handle = NV_ENGCTX(FIFO, 0x50),
410 .ofuncs = &(struct nouveau_ofuncs) {
411 .ctor = nv50_fifo_context_ctor,
412 .dtor = nv50_fifo_context_dtor,
413 .init = _nouveau_fifo_context_init,
414 .fini = _nouveau_fifo_context_fini,
415 .rd32 = _nouveau_fifo_context_rd32,
416 .wr32 = _nouveau_fifo_context_wr32,
417 },
418};
419
420/*******************************************************************************
421 * PFIFO engine
422 ******************************************************************************/
423
424static int
425nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
426 struct nouveau_oclass *oclass, void *data, u32 size,
427 struct nouveau_object **pobject)
428{
429 struct nv50_fifo_priv *priv;
430 int ret;
431
432 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
433 *pobject = nv_object(priv);
434 if (ret)
435 return ret;
436
437 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
438 &priv->playlist[0]);
439 if (ret)
440 return ret;
441
442 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
443 &priv->playlist[1]);
444 if (ret)
445 return ret;
446
447 nv_subdev(priv)->unit = 0x00000100;
448 nv_subdev(priv)->intr = nv04_fifo_intr;
449 nv_engine(priv)->cclass = &nv50_fifo_cclass;
450 nv_engine(priv)->sclass = nv50_fifo_sclass;
451 return 0;
452}
453
454void
455nv50_fifo_dtor(struct nouveau_object *object)
456{
457 struct nv50_fifo_priv *priv = (void *)object;
458
459 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
460 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
461
462 nouveau_fifo_destroy(&priv->base);
463}
464
465int
466nv50_fifo_init(struct nouveau_object *object)
467{
468 struct nv50_fifo_priv *priv = (void *)object;
469 int ret, i;
470
471 ret = nouveau_fifo_init(&priv->base);
472 if (ret)
473 return ret;
474
475 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
476 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
477 nv_wr32(priv, 0x00250c, 0x6f3cfc34);
478 nv_wr32(priv, 0x002044, 0x01003fff);
479
480 nv_wr32(priv, 0x002100, 0xffffffff);
481 nv_wr32(priv, 0x002140, 0xffffffff);
482
483 for (i = 0; i < 128; i++)
484 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
485 nv50_fifo_playlist_update(priv);
486
487 nv_wr32(priv, 0x003200, 0x00000001);
488 nv_wr32(priv, 0x003250, 0x00000001);
489 nv_wr32(priv, 0x002500, 0x00000001);
490 return 0;
491}
492
493struct nouveau_oclass
494nv50_fifo_oclass = {
495 .handle = NV_ENGINE(FIFO, 0x50),
496 .ofuncs = &(struct nouveau_ofuncs) {
497 .ctor = nv50_fifo_ctor,
498 .dtor = nv50_fifo_dtor,
499 .init = nv50_fifo_init,
500 .fini = _nouveau_fifo_fini,
501 },
502};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 000000000000..3a9ceb315c20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
1#ifndef __NV50_FIFO_H__
2#define __NV50_FIFO_H__
3
4struct nv50_fifo_priv {
5 struct nouveau_fifo base;
6 struct nouveau_gpuobj *playlist[2];
7 int cur_playlist;
8};
9
10struct nv50_fifo_base {
11 struct nouveau_fifo_base base;
12 struct nouveau_gpuobj *ramfc;
13 struct nouveau_gpuobj *cache;
14 struct nouveau_gpuobj *eng;
15 struct nouveau_gpuobj *pgd;
16 struct nouveau_vm *vm;
17};
18
19struct nv50_fifo_chan {
20 struct nouveau_fifo_chan base;
21 u32 subc[8];
22 struct nouveau_ramht *ramht;
23};
24
25void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
26
27void nv50_fifo_object_detach(struct nouveau_object *, int);
28void nv50_fifo_chan_dtor(struct nouveau_object *);
29int nv50_fifo_chan_fini(struct nouveau_object *, bool);
30
31void nv50_fifo_context_dtor(struct nouveau_object *);
32
33void nv50_fifo_dtor(struct nouveau_object *);
34int nv50_fifo_init(struct nouveau_object *);
35
36#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
new file mode 100644
index 000000000000..b4fd26d8f166
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -0,0 +1,420 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/client.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29#include <core/class.h>
30#include <core/math.h>
31
32#include <subdev/timer.h>
33#include <subdev/bar.h>
34
35#include <engine/dmaobj.h>
36#include <engine/fifo.h>
37
38#include "nv50.h"
39
40/*******************************************************************************
41 * FIFO channel objects
42 ******************************************************************************/
43
44static int
45nv84_fifo_context_attach(struct nouveau_object *parent,
46 struct nouveau_object *object)
47{
48 struct nouveau_bar *bar = nouveau_bar(parent);
49 struct nv50_fifo_base *base = (void *)parent->parent;
50 struct nouveau_gpuobj *ectx = (void *)object;
51 u64 limit = ectx->addr + ectx->size - 1;
52 u64 start = ectx->addr;
53 u32 addr;
54
55 switch (nv_engidx(object->engine)) {
56 case NVDEV_ENGINE_SW : return 0;
57 case NVDEV_ENGINE_GR : addr = 0x0020; break;
58 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
59 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
60 case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
61 default:
62 return -EINVAL;
63 }
64
65 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
66 nv_wo32(base->eng, addr + 0x00, 0x00190000);
67 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
68 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
69 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
70 upper_32_bits(start));
71 nv_wo32(base->eng, addr + 0x10, 0x00000000);
72 nv_wo32(base->eng, addr + 0x14, 0x00000000);
73 bar->flush(bar);
74 return 0;
75}
76
77static int
78nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
79 struct nouveau_object *object)
80{
81 struct nouveau_bar *bar = nouveau_bar(parent);
82 struct nv50_fifo_priv *priv = (void *)parent->engine;
83 struct nv50_fifo_base *base = (void *)parent->parent;
84 struct nv50_fifo_chan *chan = (void *)parent;
85 u32 addr, save, engn;
86 bool done;
87
88 switch (nv_engidx(object->engine)) {
89 case NVDEV_ENGINE_SW : return 0;
90 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
91 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
92 case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
93 case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
94 default:
95 return -EINVAL;
96 }
97
98 nv_wo32(base->eng, addr + 0x00, 0x00000000);
99 nv_wo32(base->eng, addr + 0x04, 0x00000000);
100 nv_wo32(base->eng, addr + 0x08, 0x00000000);
101 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
102 nv_wo32(base->eng, addr + 0x10, 0x00000000);
103 nv_wo32(base->eng, addr + 0x14, 0x00000000);
104 bar->flush(bar);
105
106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
109 nv_wr32(priv, 0x002520, save);
110 if (!done) {
111 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
112 if (suspend)
113 return -EBUSY;
114 }
115 return 0;
116}
117
118static int
119nv84_fifo_object_attach(struct nouveau_object *parent,
120 struct nouveau_object *object, u32 handle)
121{
122 struct nv50_fifo_chan *chan = (void *)parent;
123 u32 context;
124
125 if (nv_iclass(object, NV_GPUOBJ_CLASS))
126 context = nv_gpuobj(object)->node->offset >> 4;
127 else
128 context = 0x00000004; /* just non-zero */
129
130 switch (nv_engidx(object->engine)) {
131 case NVDEV_ENGINE_DMAOBJ:
132 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
133 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
134 case NVDEV_ENGINE_MPEG :
135 case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
136 case NVDEV_ENGINE_ME :
137 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
138 case NVDEV_ENGINE_VP : context |= 0x00400000; break;
139 case NVDEV_ENGINE_CRYPT :
140 case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
141 case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
142 default:
143 return -EINVAL;
144 }
145
146 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
147}
148
149static int
150nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
151 struct nouveau_object *engine,
152 struct nouveau_oclass *oclass, void *data, u32 size,
153 struct nouveau_object **pobject)
154{
155 struct nouveau_bar *bar = nouveau_bar(parent);
156 struct nv50_fifo_base *base = (void *)parent;
157 struct nv50_fifo_chan *chan;
158 struct nv03_channel_dma_class *args = data;
159 int ret;
160
161 if (size < sizeof(*args))
162 return -EINVAL;
163
164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
165 0x2000, args->pushbuf,
166 (1 << NVDEV_ENGINE_DMAOBJ) |
167 (1 << NVDEV_ENGINE_SW) |
168 (1 << NVDEV_ENGINE_GR) |
169 (1 << NVDEV_ENGINE_MPEG) |
170 (1 << NVDEV_ENGINE_ME) |
171 (1 << NVDEV_ENGINE_VP) |
172 (1 << NVDEV_ENGINE_CRYPT) |
173 (1 << NVDEV_ENGINE_BSP) |
174 (1 << NVDEV_ENGINE_PPP) |
175 (1 << NVDEV_ENGINE_COPY0) |
176 (1 << NVDEV_ENGINE_UNK1C1), &chan);
177 *pobject = nv_object(chan);
178 if (ret)
179 return ret;
180
181 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
182 if (ret)
183 return ret;
184
185 nv_parent(chan)->context_attach = nv84_fifo_context_attach;
186 nv_parent(chan)->context_detach = nv84_fifo_context_detach;
187 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
188 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
189
190 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
191 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
192 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
193 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
194 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
195 nv_wo32(base->ramfc, 0x44, 0x01003fff);
196 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
197 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
198 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
199 nv_wo32(base->ramfc, 0x78, 0x00000000);
200 nv_wo32(base->ramfc, 0x7c, 0x30000001);
201 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
202 (4 << 24) /* SEARCH_FULL */ |
203 (chan->ramht->base.node->offset >> 4));
204 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
205 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
206 bar->flush(bar);
207 return 0;
208}
209
210static int
211nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
212 struct nouveau_object *engine,
213 struct nouveau_oclass *oclass, void *data, u32 size,
214 struct nouveau_object **pobject)
215{
216 struct nouveau_bar *bar = nouveau_bar(parent);
217 struct nv50_fifo_base *base = (void *)parent;
218 struct nv50_fifo_chan *chan;
219 struct nv50_channel_ind_class *args = data;
220 u64 ioffset, ilength;
221 int ret;
222
223 if (size < sizeof(*args))
224 return -EINVAL;
225
226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
227 0x2000, args->pushbuf,
228 (1 << NVDEV_ENGINE_DMAOBJ) |
229 (1 << NVDEV_ENGINE_SW) |
230 (1 << NVDEV_ENGINE_GR) |
231 (1 << NVDEV_ENGINE_MPEG) |
232 (1 << NVDEV_ENGINE_ME) |
233 (1 << NVDEV_ENGINE_VP) |
234 (1 << NVDEV_ENGINE_CRYPT) |
235 (1 << NVDEV_ENGINE_BSP) |
236 (1 << NVDEV_ENGINE_PPP) |
237 (1 << NVDEV_ENGINE_COPY0) |
238 (1 << NVDEV_ENGINE_UNK1C1), &chan);
239 *pobject = nv_object(chan);
240 if (ret)
241 return ret;
242
243 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
244 if (ret)
245 return ret;
246
247 nv_parent(chan)->context_attach = nv84_fifo_context_attach;
248 nv_parent(chan)->context_detach = nv84_fifo_context_detach;
249 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
250 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
251
252 ioffset = args->ioffset;
253 ilength = log2i(args->ilength / 8);
254
255 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
256 nv_wo32(base->ramfc, 0x44, 0x01003fff);
257 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
258 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
259 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
260 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
261 nv_wo32(base->ramfc, 0x78, 0x00000000);
262 nv_wo32(base->ramfc, 0x7c, 0x30000001);
263 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
264 (4 << 24) /* SEARCH_FULL */ |
265 (chan->ramht->base.node->offset >> 4));
266 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
267 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
268 bar->flush(bar);
269 return 0;
270}
271
272static int
273nv84_fifo_chan_init(struct nouveau_object *object)
274{
275 struct nv50_fifo_priv *priv = (void *)object->engine;
276 struct nv50_fifo_base *base = (void *)object->parent;
277 struct nv50_fifo_chan *chan = (void *)object;
278 struct nouveau_gpuobj *ramfc = base->ramfc;
279 u32 chid = chan->base.chid;
280 int ret;
281
282 ret = nouveau_fifo_channel_init(&chan->base);
283 if (ret)
284 return ret;
285
286 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
287 nv50_fifo_playlist_update(priv);
288 return 0;
289}
290
291static struct nouveau_ofuncs
292nv84_fifo_ofuncs_dma = {
293 .ctor = nv84_fifo_chan_ctor_dma,
294 .dtor = nv50_fifo_chan_dtor,
295 .init = nv84_fifo_chan_init,
296 .fini = nv50_fifo_chan_fini,
297 .rd32 = _nouveau_fifo_channel_rd32,
298 .wr32 = _nouveau_fifo_channel_wr32,
299};
300
301static struct nouveau_ofuncs
302nv84_fifo_ofuncs_ind = {
303 .ctor = nv84_fifo_chan_ctor_ind,
304 .dtor = nv50_fifo_chan_dtor,
305 .init = nv84_fifo_chan_init,
306 .fini = nv50_fifo_chan_fini,
307 .rd32 = _nouveau_fifo_channel_rd32,
308 .wr32 = _nouveau_fifo_channel_wr32,
309};
310
311static struct nouveau_oclass
312nv84_fifo_sclass[] = {
313 { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
314 { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
315 {}
316};
317
318/*******************************************************************************
319 * FIFO context - basically just the instmem reserved for the channel
320 ******************************************************************************/
321
322static int
323nv84_fifo_context_ctor(struct nouveau_object *parent,
324 struct nouveau_object *engine,
325 struct nouveau_oclass *oclass, void *data, u32 size,
326 struct nouveau_object **pobject)
327{
328 struct nv50_fifo_base *base;
329 int ret;
330
331 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
332 0x1000, NVOBJ_FLAG_HEAP, &base);
333 *pobject = nv_object(base);
334 if (ret)
335 return ret;
336
337 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
338 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
339 if (ret)
340 return ret;
341
342 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
343 0, &base->pgd);
344 if (ret)
345 return ret;
346
347 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
348 if (ret)
349 return ret;
350
351 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
352 NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
353 if (ret)
354 return ret;
355
356 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
357 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
358 if (ret)
359 return ret;
360
361 return 0;
362}
363
364static struct nouveau_oclass
365nv84_fifo_cclass = {
366 .handle = NV_ENGCTX(FIFO, 0x84),
367 .ofuncs = &(struct nouveau_ofuncs) {
368 .ctor = nv84_fifo_context_ctor,
369 .dtor = nv50_fifo_context_dtor,
370 .init = _nouveau_fifo_context_init,
371 .fini = _nouveau_fifo_context_fini,
372 .rd32 = _nouveau_fifo_context_rd32,
373 .wr32 = _nouveau_fifo_context_wr32,
374 },
375};
376
377/*******************************************************************************
378 * PFIFO engine
379 ******************************************************************************/
380
381static int
382nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
383 struct nouveau_oclass *oclass, void *data, u32 size,
384 struct nouveau_object **pobject)
385{
386 struct nv50_fifo_priv *priv;
387 int ret;
388
389 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
390 *pobject = nv_object(priv);
391 if (ret)
392 return ret;
393
394 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
395 &priv->playlist[0]);
396 if (ret)
397 return ret;
398
399 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
400 &priv->playlist[1]);
401 if (ret)
402 return ret;
403
404 nv_subdev(priv)->unit = 0x00000100;
405 nv_subdev(priv)->intr = nv04_fifo_intr;
406 nv_engine(priv)->cclass = &nv84_fifo_cclass;
407 nv_engine(priv)->sclass = nv84_fifo_sclass;
408 return 0;
409}
410
411struct nouveau_oclass
412nv84_fifo_oclass = {
413 .handle = NV_ENGINE(FIFO, 0x84),
414 .ofuncs = &(struct nouveau_ofuncs) {
415 .ctor = nv84_fifo_ctor,
416 .dtor = nv50_fifo_dtor,
417 .init = nv50_fifo_init,
418 .fini = _nouveau_fifo_fini,
419 },
420};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
new file mode 100644
index 000000000000..6f21be600557
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -0,0 +1,647 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
33
34#include <subdev/timer.h>
35#include <subdev/bar.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40
41struct nvc0_fifo_priv {
42 struct nouveau_fifo base;
43 struct nouveau_gpuobj *playlist[2];
44 int cur_playlist;
45 struct {
46 struct nouveau_gpuobj *mem;
47 struct nouveau_vma bar;
48 } user;
49 int spoon_nr;
50};
51
52struct nvc0_fifo_base {
53 struct nouveau_fifo_base base;
54 struct nouveau_gpuobj *pgd;
55 struct nouveau_vm *vm;
56};
57
58struct nvc0_fifo_chan {
59 struct nouveau_fifo_chan base;
60};
61
62/*******************************************************************************
63 * FIFO channel objects
64 ******************************************************************************/
65
66static void
67nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
68{
69 struct nouveau_bar *bar = nouveau_bar(priv);
70 struct nouveau_gpuobj *cur;
71 int i, p;
72
73 cur = priv->playlist[priv->cur_playlist];
74 priv->cur_playlist = !priv->cur_playlist;
75
76 for (i = 0, p = 0; i < 128; i++) {
77 if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
78 continue;
79 nv_wo32(cur, p + 0, i);
80 nv_wo32(cur, p + 4, 0x00000004);
81 p += 8;
82 }
83 bar->flush(bar);
84
85 nv_wr32(priv, 0x002270, cur->addr >> 12);
86 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
87 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
88 nv_error(priv, "playlist update failed\n");
89}
90
91static int
92nvc0_fifo_context_attach(struct nouveau_object *parent,
93 struct nouveau_object *object)
94{
95 struct nouveau_bar *bar = nouveau_bar(parent);
96 struct nvc0_fifo_base *base = (void *)parent->parent;
97 struct nouveau_engctx *ectx = (void *)object;
98 u32 addr;
99 int ret;
100
101 switch (nv_engidx(object->engine)) {
102 case NVDEV_ENGINE_SW : return 0;
103 case NVDEV_ENGINE_GR : addr = 0x0210; break;
104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
106 default:
107 return -EINVAL;
108 }
109
110 if (!ectx->vma.node) {
111 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
112 NV_MEM_ACCESS_RW, &ectx->vma);
113 if (ret)
114 return ret;
115
116 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
117 }
118
119 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
120 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
121 bar->flush(bar);
122 return 0;
123}
124
125static int
126nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
127 struct nouveau_object *object)
128{
129 struct nouveau_bar *bar = nouveau_bar(parent);
130 struct nvc0_fifo_priv *priv = (void *)parent->engine;
131 struct nvc0_fifo_base *base = (void *)parent->parent;
132 struct nvc0_fifo_chan *chan = (void *)parent;
133 u32 addr;
134
135 switch (nv_engidx(object->engine)) {
136 case NVDEV_ENGINE_SW : return 0;
137 case NVDEV_ENGINE_GR : addr = 0x0210; break;
138 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
139 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
140 default:
141 return -EINVAL;
142 }
143
144 nv_wo32(base, addr + 0x00, 0x00000000);
145 nv_wo32(base, addr + 0x04, 0x00000000);
146 bar->flush(bar);
147
148 nv_wr32(priv, 0x002634, chan->base.chid);
149 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
150 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
151 if (suspend)
152 return -EBUSY;
153 }
154
155 return 0;
156}
157
158static int
159nvc0_fifo_chan_ctor(struct nouveau_object *parent,
160 struct nouveau_object *engine,
161 struct nouveau_oclass *oclass, void *data, u32 size,
162 struct nouveau_object **pobject)
163{
164 struct nouveau_bar *bar = nouveau_bar(parent);
165 struct nvc0_fifo_priv *priv = (void *)engine;
166 struct nvc0_fifo_base *base = (void *)parent;
167 struct nvc0_fifo_chan *chan;
168 struct nv50_channel_ind_class *args = data;
169 u64 usermem, ioffset, ilength;
170 int ret, i;
171
172 if (size < sizeof(*args))
173 return -EINVAL;
174
175 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
176 priv->user.bar.offset, 0x1000,
177 args->pushbuf,
178 (1 << NVDEV_ENGINE_SW) |
179 (1 << NVDEV_ENGINE_GR) |
180 (1 << NVDEV_ENGINE_COPY0) |
181 (1 << NVDEV_ENGINE_COPY1), &chan);
182 *pobject = nv_object(chan);
183 if (ret)
184 return ret;
185
186 nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
187 nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
188
189 usermem = chan->base.chid * 0x1000;
190 ioffset = args->ioffset;
191 ilength = log2i(args->ilength / 8);
192
193 for (i = 0; i < 0x1000; i += 4)
194 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
195
196 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
197 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
198 nv_wo32(base, 0x10, 0x0000face);
199 nv_wo32(base, 0x30, 0xfffff902);
200 nv_wo32(base, 0x48, lower_32_bits(ioffset));
201 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
202 nv_wo32(base, 0x54, 0x00000002);
203 nv_wo32(base, 0x84, 0x20400000);
204 nv_wo32(base, 0x94, 0x30000001);
205 nv_wo32(base, 0x9c, 0x00000100);
206 nv_wo32(base, 0xa4, 0x1f1f1f1f);
207 nv_wo32(base, 0xa8, 0x1f1f1f1f);
208 nv_wo32(base, 0xac, 0x0000001f);
209 nv_wo32(base, 0xb8, 0xf8000000);
210 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
211 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
212 bar->flush(bar);
213 return 0;
214}
215
216static int
217nvc0_fifo_chan_init(struct nouveau_object *object)
218{
219 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
220 struct nvc0_fifo_priv *priv = (void *)object->engine;
221 struct nvc0_fifo_chan *chan = (void *)object;
222 u32 chid = chan->base.chid;
223 int ret;
224
225 ret = nouveau_fifo_channel_init(&chan->base);
226 if (ret)
227 return ret;
228
229 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
230 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
231 nvc0_fifo_playlist_update(priv);
232 return 0;
233}
234
235static int
236nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
237{
238 struct nvc0_fifo_priv *priv = (void *)object->engine;
239 struct nvc0_fifo_chan *chan = (void *)object;
240 u32 chid = chan->base.chid;
241
242 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
243 nvc0_fifo_playlist_update(priv);
244 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
245
246 return nouveau_fifo_channel_fini(&chan->base, suspend);
247}
248
249static struct nouveau_ofuncs
250nvc0_fifo_ofuncs = {
251 .ctor = nvc0_fifo_chan_ctor,
252 .dtor = _nouveau_fifo_channel_dtor,
253 .init = nvc0_fifo_chan_init,
254 .fini = nvc0_fifo_chan_fini,
255 .rd32 = _nouveau_fifo_channel_rd32,
256 .wr32 = _nouveau_fifo_channel_wr32,
257};
258
259static struct nouveau_oclass
260nvc0_fifo_sclass[] = {
261 { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
262 {}
263};
264
265/*******************************************************************************
266 * FIFO context - instmem heap and vm setup
267 ******************************************************************************/
268
269static int
270nvc0_fifo_context_ctor(struct nouveau_object *parent,
271 struct nouveau_object *engine,
272 struct nouveau_oclass *oclass, void *data, u32 size,
273 struct nouveau_object **pobject)
274{
275 struct nvc0_fifo_base *base;
276 int ret;
277
278 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
279 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
280 NVOBJ_FLAG_HEAP, &base);
281 *pobject = nv_object(base);
282 if (ret)
283 return ret;
284
285 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
286 if (ret)
287 return ret;
288
289 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
290 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
291 nv_wo32(base, 0x0208, 0xffffffff);
292 nv_wo32(base, 0x020c, 0x000000ff);
293
294 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
295 if (ret)
296 return ret;
297
298 return 0;
299}
300
301static void
302nvc0_fifo_context_dtor(struct nouveau_object *object)
303{
304 struct nvc0_fifo_base *base = (void *)object;
305 nouveau_vm_ref(NULL, &base->vm, base->pgd);
306 nouveau_gpuobj_ref(NULL, &base->pgd);
307 nouveau_fifo_context_destroy(&base->base);
308}
309
310static struct nouveau_oclass
311nvc0_fifo_cclass = {
312 .handle = NV_ENGCTX(FIFO, 0xc0),
313 .ofuncs = &(struct nouveau_ofuncs) {
314 .ctor = nvc0_fifo_context_ctor,
315 .dtor = nvc0_fifo_context_dtor,
316 .init = _nouveau_fifo_context_init,
317 .fini = _nouveau_fifo_context_fini,
318 .rd32 = _nouveau_fifo_context_rd32,
319 .wr32 = _nouveau_fifo_context_wr32,
320 },
321};
322
323/*******************************************************************************
324 * PFIFO engine
325 ******************************************************************************/
326
327static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
328 { 0x00, "PGRAPH" },
329 { 0x03, "PEEPHOLE" },
330 { 0x04, "BAR1" },
331 { 0x05, "BAR3" },
332 { 0x07, "PFIFO" },
333 { 0x10, "PBSP" },
334 { 0x11, "PPPP" },
335 { 0x13, "PCOUNTER" },
336 { 0x14, "PVP" },
337 { 0x15, "PCOPY0" },
338 { 0x16, "PCOPY1" },
339 { 0x17, "PDAEMON" },
340 {}
341};
342
343static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
344 { 0x00, "PT_NOT_PRESENT" },
345 { 0x01, "PT_TOO_SHORT" },
346 { 0x02, "PAGE_NOT_PRESENT" },
347 { 0x03, "VM_LIMIT_EXCEEDED" },
348 { 0x04, "NO_CHANNEL" },
349 { 0x05, "PAGE_SYSTEM_ONLY" },
350 { 0x06, "PAGE_READ_ONLY" },
351 { 0x0a, "COMPRESSED_SYSRAM" },
352 { 0x0c, "INVALID_STORAGE_TYPE" },
353 {}
354};
355
356static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
357 { 0x01, "PCOPY0" },
358 { 0x02, "PCOPY1" },
359 { 0x04, "DISPATCH" },
360 { 0x05, "CTXCTL" },
361 { 0x06, "PFIFO" },
362 { 0x07, "BAR_READ" },
363 { 0x08, "BAR_WRITE" },
364 { 0x0b, "PVP" },
365 { 0x0c, "PPPP" },
366 { 0x0d, "PBSP" },
367 { 0x11, "PCOUNTER" },
368 { 0x12, "PDAEMON" },
369 { 0x14, "CCACHE" },
370 { 0x15, "CCACHE_POST" },
371 {}
372};
373
374static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
375 { 0x01, "TEX" },
376 { 0x0c, "ESETUP" },
377 { 0x0e, "CTXCTL" },
378 { 0x0f, "PROP" },
379 {}
380};
381
382static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
383/* { 0x00008000, "" } seen with null ib push */
384 { 0x00200000, "ILLEGAL_MTHD" },
385 { 0x00800000, "EMPTY_SUBC" },
386 {}
387};
388
389static void
390nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
391{
392 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
393 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
394 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
395 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
396 u32 client = (stat & 0x00001f00) >> 8;
397
398 switch (unit) {
399 case 3: /* PEEPHOLE */
400 nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
401 break;
402 case 4: /* BAR1 */
403 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
404 break;
405 case 5: /* BAR3 */
406 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
407 break;
408 default:
409 break;
410 }
411
412 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
413 "write" : "read", (u64)vahi << 32 | valo);
414 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
415 printk("] from ");
416 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
417 if (stat & 0x00000040) {
418 printk("/");
419 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
420 } else {
421 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
422 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
423 }
424 printk(" on channel 0x%010llx\n", (u64)inst << 12);
425}
426
427static int
428nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
429{
430 struct nvc0_fifo_chan *chan = NULL;
431 struct nouveau_handle *bind;
432 unsigned long flags;
433 int ret = -EINVAL;
434
435 spin_lock_irqsave(&priv->base.lock, flags);
436 if (likely(chid >= priv->base.min && chid <= priv->base.max))
437 chan = (void *)priv->base.channel[chid];
438 if (unlikely(!chan))
439 goto out;
440
441 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
442 if (likely(bind)) {
443 if (!mthd || !nv_call(bind->object, mthd, data))
444 ret = 0;
445 nouveau_namedb_put(bind);
446 }
447
448out:
449 spin_unlock_irqrestore(&priv->base.lock, flags);
450 return ret;
451}
452
453static void
454nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
455{
456 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
457 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
458 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
459 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
460 u32 subc = (addr & 0x00070000) >> 16;
461 u32 mthd = (addr & 0x00003ffc);
462 u32 show = stat;
463
464 if (stat & 0x00200000) {
465 if (mthd == 0x0054) {
466 if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
467 show &= ~0x00200000;
468 }
469 }
470
471 if (stat & 0x00800000) {
472 if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
473 show &= ~0x00800000;
474 }
475
476 if (show) {
477 nv_error(priv, "SUBFIFO%d:", unit);
478 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
479 printk("\n");
480 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
481 "data 0x%08x\n",
482 unit, chid, subc, mthd, data);
483 }
484
485 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
486 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
487}
488
489static void
490nvc0_fifo_intr(struct nouveau_subdev *subdev)
491{
492 struct nvc0_fifo_priv *priv = (void *)subdev;
493 u32 mask = nv_rd32(priv, 0x002140);
494 u32 stat = nv_rd32(priv, 0x002100) & mask;
495
496 if (stat & 0x00000100) {
497 nv_info(priv, "unknown status 0x00000100\n");
498 nv_wr32(priv, 0x002100, 0x00000100);
499 stat &= ~0x00000100;
500 }
501
502 if (stat & 0x10000000) {
503 u32 units = nv_rd32(priv, 0x00259c);
504 u32 u = units;
505
506 while (u) {
507 int i = ffs(u) - 1;
508 nvc0_fifo_isr_vm_fault(priv, i);
509 u &= ~(1 << i);
510 }
511
512 nv_wr32(priv, 0x00259c, units);
513 stat &= ~0x10000000;
514 }
515
516 if (stat & 0x20000000) {
517 u32 units = nv_rd32(priv, 0x0025a0);
518 u32 u = units;
519
520 while (u) {
521 int i = ffs(u) - 1;
522 nvc0_fifo_isr_subfifo_intr(priv, i);
523 u &= ~(1 << i);
524 }
525
526 nv_wr32(priv, 0x0025a0, units);
527 stat &= ~0x20000000;
528 }
529
530 if (stat & 0x40000000) {
531 nv_warn(priv, "unknown status 0x40000000\n");
532 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
533 stat &= ~0x40000000;
534 }
535
536 if (stat) {
537 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
538 nv_wr32(priv, 0x002100, stat);
539 nv_wr32(priv, 0x002140, 0);
540 }
541}
542
543static int
544nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
545 struct nouveau_oclass *oclass, void *data, u32 size,
546 struct nouveau_object **pobject)
547{
548 struct nvc0_fifo_priv *priv;
549 int ret;
550
551 ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
552 *pobject = nv_object(priv);
553 if (ret)
554 return ret;
555
556 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
557 &priv->playlist[0]);
558 if (ret)
559 return ret;
560
561 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
562 &priv->playlist[1]);
563 if (ret)
564 return ret;
565
566 ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
567 &priv->user.mem);
568 if (ret)
569 return ret;
570
571 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
572 &priv->user.bar);
573 if (ret)
574 return ret;
575
576 nv_subdev(priv)->unit = 0x00000100;
577 nv_subdev(priv)->intr = nvc0_fifo_intr;
578 nv_engine(priv)->cclass = &nvc0_fifo_cclass;
579 nv_engine(priv)->sclass = nvc0_fifo_sclass;
580 return 0;
581}
582
583static void
584nvc0_fifo_dtor(struct nouveau_object *object)
585{
586 struct nvc0_fifo_priv *priv = (void *)object;
587
588 nouveau_gpuobj_unmap(&priv->user.bar);
589 nouveau_gpuobj_ref(NULL, &priv->user.mem);
590 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
591 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
592
593 nouveau_fifo_destroy(&priv->base);
594}
595
596static int
597nvc0_fifo_init(struct nouveau_object *object)
598{
599 struct nvc0_fifo_priv *priv = (void *)object;
600 int ret, i;
601
602 ret = nouveau_fifo_init(&priv->base);
603 if (ret)
604 return ret;
605
606 nv_wr32(priv, 0x000204, 0xffffffff);
607 nv_wr32(priv, 0x002204, 0xffffffff);
608
609 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
610 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
611
612 /* assign engines to subfifos */
613 if (priv->spoon_nr >= 3) {
614 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
615 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
616 nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
617 nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
618 nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
619 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
620 }
621
622 /* PSUBFIFO[n] */
623 for (i = 0; i < priv->spoon_nr; i++) {
624 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
625 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
626 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
627 }
628
629 nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
630 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
631
632 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
633 nv_wr32(priv, 0x002100, 0xffffffff);
634 nv_wr32(priv, 0x002140, 0xbfffffff);
635 return 0;
636}
637
638struct nouveau_oclass
639nvc0_fifo_oclass = {
640 .handle = NV_ENGINE(FIFO, 0xc0),
641 .ofuncs = &(struct nouveau_ofuncs) {
642 .ctor = nvc0_fifo_ctor,
643 .dtor = nvc0_fifo_dtor,
644 .init = nvc0_fifo_init,
645 .fini = _nouveau_fifo_fini,
646 },
647};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
new file mode 100644
index 000000000000..36e81b6fafbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -0,0 +1,628 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
33
34#include <subdev/timer.h>
35#include <subdev/bar.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40
41#define _(a,b) { (a), ((1 << (a)) | (b)) }
42static const struct {
43 int subdev;
44 u32 mask;
45} fifo_engine[] = {
46 _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
47 _(NVDEV_ENGINE_VP , 0),
48 _(NVDEV_ENGINE_PPP , 0),
49 _(NVDEV_ENGINE_BSP , 0),
50 _(NVDEV_ENGINE_COPY0 , 0),
51 _(NVDEV_ENGINE_COPY1 , 0),
52 _(NVDEV_ENGINE_VENC , 0),
53};
54#undef _
55#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
56
57struct nve0_fifo_engn {
58 struct nouveau_gpuobj *playlist[2];
59 int cur_playlist;
60};
61
62struct nve0_fifo_priv {
63 struct nouveau_fifo base;
64 struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
65 struct {
66 struct nouveau_gpuobj *mem;
67 struct nouveau_vma bar;
68 } user;
69 int spoon_nr;
70};
71
72struct nve0_fifo_base {
73 struct nouveau_fifo_base base;
74 struct nouveau_gpuobj *pgd;
75 struct nouveau_vm *vm;
76};
77
78struct nve0_fifo_chan {
79 struct nouveau_fifo_chan base;
80 u32 engine;
81};
82
83/*******************************************************************************
84 * FIFO channel objects
85 ******************************************************************************/
86
87static void
88nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
89{
90 struct nouveau_bar *bar = nouveau_bar(priv);
91 struct nve0_fifo_engn *engn = &priv->engine[engine];
92 struct nouveau_gpuobj *cur;
93 u32 match = (engine << 16) | 0x00000001;
94 int i, p;
95
96 cur = engn->playlist[engn->cur_playlist];
97 if (unlikely(cur == NULL)) {
98 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
99 0x8000, 0x1000, 0, &cur);
100 if (ret) {
101 nv_error(priv, "playlist alloc failed\n");
102 return;
103 }
104
105 engn->playlist[engn->cur_playlist] = cur;
106 }
107
108 engn->cur_playlist = !engn->cur_playlist;
109
110 for (i = 0, p = 0; i < priv->base.max; i++) {
111 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
112 if (ctrl != match)
113 continue;
114 nv_wo32(cur, p + 0, i);
115 nv_wo32(cur, p + 4, 0x00000000);
116 p += 8;
117 }
118 bar->flush(bar);
119
120 nv_wr32(priv, 0x002270, cur->addr >> 12);
121 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
122 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
123 nv_error(priv, "playlist %d update timeout\n", engine);
124}
125
126static int
127nve0_fifo_context_attach(struct nouveau_object *parent,
128 struct nouveau_object *object)
129{
130 struct nouveau_bar *bar = nouveau_bar(parent);
131 struct nve0_fifo_base *base = (void *)parent->parent;
132 struct nouveau_engctx *ectx = (void *)object;
133 u32 addr;
134 int ret;
135
136 switch (nv_engidx(object->engine)) {
137 case NVDEV_ENGINE_SW : return 0;
138 case NVDEV_ENGINE_GR :
139 case NVDEV_ENGINE_COPY0:
140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
141 default:
142 return -EINVAL;
143 }
144
145 if (!ectx->vma.node) {
146 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
147 NV_MEM_ACCESS_RW, &ectx->vma);
148 if (ret)
149 return ret;
150
151 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
152 }
153
154 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
155 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
156 bar->flush(bar);
157 return 0;
158}
159
160static int
161nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
162 struct nouveau_object *object)
163{
164 struct nouveau_bar *bar = nouveau_bar(parent);
165 struct nve0_fifo_priv *priv = (void *)parent->engine;
166 struct nve0_fifo_base *base = (void *)parent->parent;
167 struct nve0_fifo_chan *chan = (void *)parent;
168 u32 addr;
169
170 switch (nv_engidx(object->engine)) {
171 case NVDEV_ENGINE_SW : return 0;
172 case NVDEV_ENGINE_GR :
173 case NVDEV_ENGINE_COPY0:
174 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
175 default:
176 return -EINVAL;
177 }
178
179 nv_wo32(base, addr + 0x00, 0x00000000);
180 nv_wo32(base, addr + 0x04, 0x00000000);
181 bar->flush(bar);
182
183 nv_wr32(priv, 0x002634, chan->base.chid);
184 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
185 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
186 if (suspend)
187 return -EBUSY;
188 }
189
190 return 0;
191}
192
193static int
194nve0_fifo_chan_ctor(struct nouveau_object *parent,
195 struct nouveau_object *engine,
196 struct nouveau_oclass *oclass, void *data, u32 size,
197 struct nouveau_object **pobject)
198{
199 struct nouveau_bar *bar = nouveau_bar(parent);
200 struct nve0_fifo_priv *priv = (void *)engine;
201 struct nve0_fifo_base *base = (void *)parent;
202 struct nve0_fifo_chan *chan;
203 struct nve0_channel_ind_class *args = data;
204 u64 usermem, ioffset, ilength;
205 int ret, i;
206
207 if (size < sizeof(*args))
208 return -EINVAL;
209
210 for (i = 0; i < FIFO_ENGINE_NR; i++) {
211 if (args->engine & (1 << i)) {
212 if (nouveau_engine(parent, fifo_engine[i].subdev)) {
213 args->engine = (1 << i);
214 break;
215 }
216 }
217 }
218
219 if (i == FIFO_ENGINE_NR)
220 return -ENODEV;
221
222 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
223 priv->user.bar.offset, 0x200,
224 args->pushbuf,
225 fifo_engine[i].mask, &chan);
226 *pobject = nv_object(chan);
227 if (ret)
228 return ret;
229
230 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
231 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
232 chan->engine = i;
233
234 usermem = chan->base.chid * 0x200;
235 ioffset = args->ioffset;
236 ilength = log2i(args->ilength / 8);
237
238 for (i = 0; i < 0x200; i += 4)
239 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
240
241 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
242 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
243 nv_wo32(base, 0x10, 0x0000face);
244 nv_wo32(base, 0x30, 0xfffff902);
245 nv_wo32(base, 0x48, lower_32_bits(ioffset));
246 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
247 nv_wo32(base, 0x84, 0x20400000);
248 nv_wo32(base, 0x94, 0x30000001);
249 nv_wo32(base, 0x9c, 0x00000100);
250 nv_wo32(base, 0xac, 0x0000001f);
251 nv_wo32(base, 0xe8, chan->base.chid);
252 nv_wo32(base, 0xb8, 0xf8000000);
253 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
254 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
255 bar->flush(bar);
256 return 0;
257}
258
259static int
260nve0_fifo_chan_init(struct nouveau_object *object)
261{
262 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
263 struct nve0_fifo_priv *priv = (void *)object->engine;
264 struct nve0_fifo_chan *chan = (void *)object;
265 u32 chid = chan->base.chid;
266 int ret;
267
268 ret = nouveau_fifo_channel_init(&chan->base);
269 if (ret)
270 return ret;
271
272 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
273 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
274 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
275 nve0_fifo_playlist_update(priv, chan->engine);
276 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
277 return 0;
278}
279
280static int
281nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
282{
283 struct nve0_fifo_priv *priv = (void *)object->engine;
284 struct nve0_fifo_chan *chan = (void *)object;
285 u32 chid = chan->base.chid;
286
287 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
288 nve0_fifo_playlist_update(priv, chan->engine);
289 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
290
291 return nouveau_fifo_channel_fini(&chan->base, suspend);
292}
293
294static struct nouveau_ofuncs
295nve0_fifo_ofuncs = {
296 .ctor = nve0_fifo_chan_ctor,
297 .dtor = _nouveau_fifo_channel_dtor,
298 .init = nve0_fifo_chan_init,
299 .fini = nve0_fifo_chan_fini,
300 .rd32 = _nouveau_fifo_channel_rd32,
301 .wr32 = _nouveau_fifo_channel_wr32,
302};
303
304static struct nouveau_oclass
305nve0_fifo_sclass[] = {
306 { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
307 {}
308};
309
310/*******************************************************************************
311 * FIFO context - instmem heap and vm setup
312 ******************************************************************************/
313
314static int
315nve0_fifo_context_ctor(struct nouveau_object *parent,
316 struct nouveau_object *engine,
317 struct nouveau_oclass *oclass, void *data, u32 size,
318 struct nouveau_object **pobject)
319{
320 struct nve0_fifo_base *base;
321 int ret;
322
323 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
324 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
325 *pobject = nv_object(base);
326 if (ret)
327 return ret;
328
329 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
330 if (ret)
331 return ret;
332
333 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
334 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
335 nv_wo32(base, 0x0208, 0xffffffff);
336 nv_wo32(base, 0x020c, 0x000000ff);
337
338 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
339 if (ret)
340 return ret;
341
342 return 0;
343}
344
345static void
346nve0_fifo_context_dtor(struct nouveau_object *object)
347{
348 struct nve0_fifo_base *base = (void *)object;
349 nouveau_vm_ref(NULL, &base->vm, base->pgd);
350 nouveau_gpuobj_ref(NULL, &base->pgd);
351 nouveau_fifo_context_destroy(&base->base);
352}
353
354static struct nouveau_oclass
355nve0_fifo_cclass = {
356 .handle = NV_ENGCTX(FIFO, 0xe0),
357 .ofuncs = &(struct nouveau_ofuncs) {
358 .ctor = nve0_fifo_context_ctor,
359 .dtor = nve0_fifo_context_dtor,
360 .init = _nouveau_fifo_context_init,
361 .fini = _nouveau_fifo_context_fini,
362 .rd32 = _nouveau_fifo_context_rd32,
363 .wr32 = _nouveau_fifo_context_wr32,
364 },
365};
366
367/*******************************************************************************
368 * PFIFO engine
369 ******************************************************************************/
370
371static const struct nouveau_enum nve0_fifo_fault_unit[] = {
372 {}
373};
374
375static const struct nouveau_enum nve0_fifo_fault_reason[] = {
376 { 0x00, "PT_NOT_PRESENT" },
377 { 0x01, "PT_TOO_SHORT" },
378 { 0x02, "PAGE_NOT_PRESENT" },
379 { 0x03, "VM_LIMIT_EXCEEDED" },
380 { 0x04, "NO_CHANNEL" },
381 { 0x05, "PAGE_SYSTEM_ONLY" },
382 { 0x06, "PAGE_READ_ONLY" },
383 { 0x0a, "COMPRESSED_SYSRAM" },
384 { 0x0c, "INVALID_STORAGE_TYPE" },
385 {}
386};
387
388static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
389 {}
390};
391
392static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
393 {}
394};
395
396static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
397 { 0x00200000, "ILLEGAL_MTHD" },
398 { 0x00800000, "EMPTY_SUBC" },
399 {}
400};
401
402static void
403nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
404{
405 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
406 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
407 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
408 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
409 u32 client = (stat & 0x00001f00) >> 8;
410
411 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
412 "write" : "read", (u64)vahi << 32 | valo);
413 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
414 printk("] from ");
415 nouveau_enum_print(nve0_fifo_fault_unit, unit);
416 if (stat & 0x00000040) {
417 printk("/");
418 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
419 } else {
420 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
421 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
422 }
423 printk(" on channel 0x%010llx\n", (u64)inst << 12);
424}
425
426static int
427nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
428{
429 struct nve0_fifo_chan *chan = NULL;
430 struct nouveau_handle *bind;
431 unsigned long flags;
432 int ret = -EINVAL;
433
434 spin_lock_irqsave(&priv->base.lock, flags);
435 if (likely(chid >= priv->base.min && chid <= priv->base.max))
436 chan = (void *)priv->base.channel[chid];
437 if (unlikely(!chan))
438 goto out;
439
440 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
441 if (likely(bind)) {
442 if (!mthd || !nv_call(bind->object, mthd, data))
443 ret = 0;
444 nouveau_namedb_put(bind);
445 }
446
447out:
448 spin_unlock_irqrestore(&priv->base.lock, flags);
449 return ret;
450}
451
452static void
453nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
454{
455 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
456 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
457 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
458 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
459 u32 subc = (addr & 0x00070000) >> 16;
460 u32 mthd = (addr & 0x00003ffc);
461 u32 show = stat;
462
463 if (stat & 0x00200000) {
464 if (mthd == 0x0054) {
465 if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
466 show &= ~0x00200000;
467 }
468 }
469
470 if (stat & 0x00800000) {
471 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
472 show &= ~0x00800000;
473 }
474
475 if (show) {
476 nv_error(priv, "SUBFIFO%d:", unit);
477 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
478 printk("\n");
479 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
480 "data 0x%08x\n",
481 unit, chid, subc, mthd, data);
482 }
483
484 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
485 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
486}
487
488static void
489nve0_fifo_intr(struct nouveau_subdev *subdev)
490{
491 struct nve0_fifo_priv *priv = (void *)subdev;
492 u32 mask = nv_rd32(priv, 0x002140);
493 u32 stat = nv_rd32(priv, 0x002100) & mask;
494
495 if (stat & 0x00000100) {
496 nv_warn(priv, "unknown status 0x00000100\n");
497 nv_wr32(priv, 0x002100, 0x00000100);
498 stat &= ~0x00000100;
499 }
500
501 if (stat & 0x10000000) {
502 u32 units = nv_rd32(priv, 0x00259c);
503 u32 u = units;
504
505 while (u) {
506 int i = ffs(u) - 1;
507 nve0_fifo_isr_vm_fault(priv, i);
508 u &= ~(1 << i);
509 }
510
511 nv_wr32(priv, 0x00259c, units);
512 stat &= ~0x10000000;
513 }
514
515 if (stat & 0x20000000) {
516 u32 units = nv_rd32(priv, 0x0025a0);
517 u32 u = units;
518
519 while (u) {
520 int i = ffs(u) - 1;
521 nve0_fifo_isr_subfifo_intr(priv, i);
522 u &= ~(1 << i);
523 }
524
525 nv_wr32(priv, 0x0025a0, units);
526 stat &= ~0x20000000;
527 }
528
529 if (stat & 0x40000000) {
530 nv_warn(priv, "unknown status 0x40000000\n");
531 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
532 stat &= ~0x40000000;
533 }
534
535 if (stat) {
536 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
537 nv_wr32(priv, 0x002100, stat);
538 nv_wr32(priv, 0x002140, 0);
539 }
540}
541
542static int
543nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
544 struct nouveau_oclass *oclass, void *data, u32 size,
545 struct nouveau_object **pobject)
546{
547 struct nve0_fifo_priv *priv;
548 int ret;
549
550 ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
551 *pobject = nv_object(priv);
552 if (ret)
553 return ret;
554
555 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
556 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
557 if (ret)
558 return ret;
559
560 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
561 &priv->user.bar);
562 if (ret)
563 return ret;
564
565 nv_subdev(priv)->unit = 0x00000100;
566 nv_subdev(priv)->intr = nve0_fifo_intr;
567 nv_engine(priv)->cclass = &nve0_fifo_cclass;
568 nv_engine(priv)->sclass = nve0_fifo_sclass;
569 return 0;
570}
571
572static void
573nve0_fifo_dtor(struct nouveau_object *object)
574{
575 struct nve0_fifo_priv *priv = (void *)object;
576 int i;
577
578 nouveau_gpuobj_unmap(&priv->user.bar);
579 nouveau_gpuobj_ref(NULL, &priv->user.mem);
580
581 for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
582 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
583 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
584 }
585
586 nouveau_fifo_destroy(&priv->base);
587}
588
589static int
590nve0_fifo_init(struct nouveau_object *object)
591{
592 struct nve0_fifo_priv *priv = (void *)object;
593 int ret, i;
594
595 ret = nouveau_fifo_init(&priv->base);
596 if (ret)
597 return ret;
598
599 /* enable all available PSUBFIFOs */
600 nv_wr32(priv, 0x000204, 0xffffffff);
601 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
602 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
603
604 /* PSUBFIFO[n] */
605 for (i = 0; i < priv->spoon_nr; i++) {
606 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
607 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
608 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
609 }
610
611 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
612
613 nv_wr32(priv, 0x002a00, 0xffffffff);
614 nv_wr32(priv, 0x002100, 0xffffffff);
615 nv_wr32(priv, 0x002140, 0xbfffffff);
616 return 0;
617}
618
619struct nouveau_oclass
620nve0_fifo_oclass = {
621 .handle = NV_ENGINE(FIFO, 0xe0),
622 .ofuncs = &(struct nouveau_ofuncs) {
623 .ctor = nve0_fifo_ctor,
624 .dtor = nve0_fifo_dtor,
625 .init = nve0_fifo_init,
626 .fini = _nouveau_fifo_fini,
627 },
628};
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbda..e1947013d3bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
2#define __NOUVEAU_GRCTX_H__ 2#define __NOUVEAU_GRCTX_H__
3 3
4struct nouveau_grctx { 4struct nouveau_grctx {
5 struct drm_device *dev; 5 struct nouveau_device *device;
6 6
7 enum { 7 enum {
8 NOUVEAU_GRCTX_PROG, 8 NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
10 } mode; 10 } mode;
11 void *data; 11 void *data;
12 12
13 uint32_t ctxprog_max; 13 u32 ctxprog_max;
14 uint32_t ctxprog_len; 14 u32 ctxprog_len;
15 uint32_t ctxprog_reg; 15 u32 ctxprog_reg;
16 int ctxprog_label[32]; 16 int ctxprog_label[32];
17 uint32_t ctxvals_pos; 17 u32 ctxvals_pos;
18 uint32_t ctxvals_base; 18 u32 ctxvals_base;
19}; 19};
20 20
21static inline void 21static inline void
22cp_out(struct nouveau_grctx *ctx, uint32_t inst) 22cp_out(struct nouveau_grctx *ctx, u32 inst)
23{ 23{
24 uint32_t *ctxprog = ctx->data; 24 u32 *ctxprog = ctx->data;
25 25
26 if (ctx->mode != NOUVEAU_GRCTX_PROG) 26 if (ctx->mode != NOUVEAU_GRCTX_PROG)
27 return; 27 return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
31} 31}
32 32
33static inline void 33static inline void
34cp_lsr(struct nouveau_grctx *ctx, uint32_t val) 34cp_lsr(struct nouveau_grctx *ctx, u32 val)
35{ 35{
36 cp_out(ctx, CP_LOAD_SR | val); 36 cp_out(ctx, CP_LOAD_SR | val);
37} 37}
38 38
39static inline void 39static inline void
40cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length) 40cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
41{ 41{
42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2; 42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
43 43
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
55static inline void 55static inline void
56cp_name(struct nouveau_grctx *ctx, int name) 56cp_name(struct nouveau_grctx *ctx, int name)
57{ 57{
58 uint32_t *ctxprog = ctx->data; 58 u32 *ctxprog = ctx->data;
59 int i; 59 int i;
60 60
61 if (ctx->mode != NOUVEAU_GRCTX_PROG) 61 if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
115} 115}
116 116
117static inline void 117static inline void
118gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) 118gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
119{ 119{
120 if (ctx->mode != NOUVEAU_GRCTX_VALS) 120 if (ctx->mode != NOUVEAU_GRCTX_VALS)
121 return; 121 return;
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index be0a74750fb1..e45035efb8ca 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/gpuobj.h>
26
25/* NVIDIA context programs handle a number of other conditions which are 27/* NVIDIA context programs handle a number of other conditions which are
26 * not implemented in our versions. It's not clear why NVIDIA context 28 * not implemented in our versions. It's not clear why NVIDIA context
27 * programs have this code, nor whether it's strictly necessary for 29 * programs have this code, nor whether it's strictly necessary for
@@ -109,20 +111,18 @@
109#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ 111#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
110#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ 112#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
111 113
112#include "drmP.h" 114#include "nv40.h"
113#include "nouveau_drv.h" 115#include "ctx.h"
114#include "nouveau_grctx.h"
115 116
116/* TODO: 117/* TODO:
117 * - get vs count from 0x1540 118 * - get vs count from 0x1540
118 */ 119 */
119 120
120static int 121static int
121nv40_graph_vs_count(struct drm_device *dev) 122nv40_graph_vs_count(struct nouveau_device *device)
122{ 123{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 124
125 switch (dev_priv->chipset) { 125 switch (device->chipset) {
126 case 0x47: 126 case 0x47:
127 case 0x49: 127 case 0x49:
128 case 0x4b: 128 case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
160static void 160static void
161nv40_graph_construct_general(struct nouveau_grctx *ctx) 161nv40_graph_construct_general(struct nouveau_grctx *ctx)
162{ 162{
163 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 163 struct nouveau_device *device = ctx->device;
164 int i; 164 int i;
165 165
166 cp_ctx(ctx, 0x4000a4, 1); 166 cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
187 cp_ctx(ctx, 0x400724, 1); 187 cp_ctx(ctx, 0x400724, 1);
188 gr_def(ctx, 0x400724, 0x02008821); 188 gr_def(ctx, 0x400724, 0x02008821);
189 cp_ctx(ctx, 0x400770, 3); 189 cp_ctx(ctx, 0x400770, 3);
190 if (dev_priv->chipset == 0x40) { 190 if (device->chipset == 0x40) {
191 cp_ctx(ctx, 0x400814, 4); 191 cp_ctx(ctx, 0x400814, 4);
192 cp_ctx(ctx, 0x400828, 5); 192 cp_ctx(ctx, 0x400828, 5);
193 cp_ctx(ctx, 0x400840, 5); 193 cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
208 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
209 } else { 209 } else {
210 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
211 if (nv44_graph_class(ctx->dev)) { 211 if (nv44_graph_class(ctx->device)) {
212 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
214 } 214 }
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
217 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
218 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
219 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
220 if (!nv44_graph_class(ctx->dev)) { 220 if (!nv44_graph_class(ctx->device)) {
221 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
223 } 223 }
224 cp_ctx(ctx, 0x4008e0, 2); 224 cp_ctx(ctx, 0x4008e0, 2);
225 cp_ctx(ctx, 0x4008f8, 2); 225 cp_ctx(ctx, 0x4008f8, 2);
226 if (dev_priv->chipset == 0x4c || 226 if (device->chipset == 0x4c ||
227 (dev_priv->chipset & 0xf0) == 0x60) 227 (device->chipset & 0xf0) == 0x60)
228 cp_ctx(ctx, 0x4009f8, 1); 228 cp_ctx(ctx, 0x4009f8, 1);
229 } 229 }
230 cp_ctx(ctx, 0x400a00, 73); 230 cp_ctx(ctx, 0x400a00, 73);
231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c); 231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
232 cp_ctx(ctx, 0x401000, 4); 232 cp_ctx(ctx, 0x401000, 4);
233 cp_ctx(ctx, 0x405004, 1); 233 cp_ctx(ctx, 0x405004, 1);
234 switch (dev_priv->chipset) { 234 switch (device->chipset) {
235 case 0x47: 235 case 0x47:
236 case 0x49: 236 case 0x49:
237 case 0x4b: 237 case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
240 break; 240 break;
241 default: 241 default:
242 cp_ctx(ctx, 0x403440, 1); 242 cp_ctx(ctx, 0x403440, 1);
243 switch (dev_priv->chipset) { 243 switch (device->chipset) {
244 case 0x40: 244 case 0x40:
245 gr_def(ctx, 0x403440, 0x00000010); 245 gr_def(ctx, 0x403440, 0x00000010);
246 break; 246 break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
266static void 266static void
267nv40_graph_construct_state3d(struct nouveau_grctx *ctx) 267nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
268{ 268{
269 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 269 struct nouveau_device *device = ctx->device;
270 int i; 270 int i;
271 271
272 if (dev_priv->chipset == 0x40) { 272 if (device->chipset == 0x40) {
273 cp_ctx(ctx, 0x401880, 51); 273 cp_ctx(ctx, 0x401880, 51);
274 gr_def(ctx, 0x401940, 0x00000100); 274 gr_def(ctx, 0x401940, 0x00000100);
275 } else 275 } else
276 if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 || 276 if (device->chipset == 0x46 || device->chipset == 0x47 ||
277 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { 277 device->chipset == 0x49 || device->chipset == 0x4b) {
278 cp_ctx(ctx, 0x401880, 32); 278 cp_ctx(ctx, 0x401880, 32);
279 for (i = 0; i < 16; i++) 279 for (i = 0; i < 16; i++)
280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111); 280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
281 if (dev_priv->chipset == 0x46) 281 if (device->chipset == 0x46)
282 cp_ctx(ctx, 0x401900, 16); 282 cp_ctx(ctx, 0x401900, 16);
283 cp_ctx(ctx, 0x401940, 3); 283 cp_ctx(ctx, 0x401940, 3);
284 } 284 }
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
289 gr_def(ctx, 0x401978, 0xffff0000); 289 gr_def(ctx, 0x401978, 0xffff0000);
290 gr_def(ctx, 0x40197c, 0x00000001); 290 gr_def(ctx, 0x40197c, 0x00000001);
291 gr_def(ctx, 0x401990, 0x46400000); 291 gr_def(ctx, 0x401990, 0x46400000);
292 if (dev_priv->chipset == 0x40) { 292 if (device->chipset == 0x40) {
293 cp_ctx(ctx, 0x4019a0, 2); 293 cp_ctx(ctx, 0x4019a0, 2);
294 cp_ctx(ctx, 0x4019ac, 5); 294 cp_ctx(ctx, 0x4019ac, 5);
295 } else { 295 } else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
297 cp_ctx(ctx, 0x4019b4, 3); 297 cp_ctx(ctx, 0x4019b4, 3);
298 } 298 }
299 gr_def(ctx, 0x4019bc, 0xffff0000); 299 gr_def(ctx, 0x4019bc, 0xffff0000);
300 switch (dev_priv->chipset) { 300 switch (device->chipset) {
301 case 0x46: 301 case 0x46:
302 case 0x47: 302 case 0x47:
303 case 0x49: 303 case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
316 for (i = 0; i < 16; i++) 316 for (i = 0; i < 16; i++)
317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); 317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
318 gr_def(ctx, 0x401a8c, 0x4b7fffff); 318 gr_def(ctx, 0x401a8c, 0x4b7fffff);
319 if (dev_priv->chipset == 0x40) { 319 if (device->chipset == 0x40) {
320 cp_ctx(ctx, 0x401ab8, 3); 320 cp_ctx(ctx, 0x401ab8, 3);
321 } else { 321 } else {
322 cp_ctx(ctx, 0x401ab8, 1); 322 cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
327 gr_def(ctx, 0x401ad4, 0x70605040); 327 gr_def(ctx, 0x401ad4, 0x70605040);
328 gr_def(ctx, 0x401ad8, 0xb8a89888); 328 gr_def(ctx, 0x401ad8, 0xb8a89888);
329 gr_def(ctx, 0x401adc, 0xf8e8d8c8); 329 gr_def(ctx, 0x401adc, 0xf8e8d8c8);
330 cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1); 330 cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
331 gr_def(ctx, 0x401b10, 0x40100000); 331 gr_def(ctx, 0x401b10, 0x40100000);
332 cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5); 332 cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
333 gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ? 333 gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
334 0x00000004 : 0x00000000); 334 0x00000004 : 0x00000000);
335 cp_ctx(ctx, 0x401b30, 25); 335 cp_ctx(ctx, 0x401b30, 25);
336 gr_def(ctx, 0x401b34, 0x0000ffff); 336 gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
341 gr_def(ctx, 0x401b84, 0xffffffff); 341 gr_def(ctx, 0x401b84, 0xffffffff);
342 gr_def(ctx, 0x401b88, 0x00ff7000); 342 gr_def(ctx, 0x401b88, 0x00ff7000);
343 gr_def(ctx, 0x401b8c, 0x0000ffff); 343 gr_def(ctx, 0x401b8c, 0x0000ffff);
344 if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a && 344 if (device->chipset != 0x44 && device->chipset != 0x4a &&
345 dev_priv->chipset != 0x4e) 345 device->chipset != 0x4e)
346 cp_ctx(ctx, 0x401b94, 1); 346 cp_ctx(ctx, 0x401b94, 1);
347 cp_ctx(ctx, 0x401b98, 8); 347 cp_ctx(ctx, 0x401b98, 8);
348 gr_def(ctx, 0x401b9c, 0x00ff0000); 348 gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
371static void 371static void
372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) 372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
373{ 373{
374 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 374 struct nouveau_device *device = ctx->device;
375 int i; 375 int i;
376 376
377 cp_ctx(ctx, 0x402000, 1); 377 cp_ctx(ctx, 0x402000, 1);
378 cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2); 378 cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
379 switch (dev_priv->chipset) { 379 switch (device->chipset) {
380 case 0x40: 380 case 0x40:
381 gr_def(ctx, 0x402404, 0x00000001); 381 gr_def(ctx, 0x402404, 0x00000001);
382 break; 382 break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
393 default: 393 default:
394 gr_def(ctx, 0x402404, 0x00000021); 394 gr_def(ctx, 0x402404, 0x00000021);
395 } 395 }
396 if (dev_priv->chipset != 0x40) 396 if (device->chipset != 0x40)
397 gr_def(ctx, 0x402408, 0x030c30c3); 397 gr_def(ctx, 0x402408, 0x030c30c3);
398 switch (dev_priv->chipset) { 398 switch (device->chipset) {
399 case 0x44: 399 case 0x44:
400 case 0x46: 400 case 0x46:
401 case 0x4a: 401 case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9); 411 cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
412 gr_def(ctx, 0x402488, 0x3e020200); 412 gr_def(ctx, 0x402488, 0x3e020200);
413 gr_def(ctx, 0x40248c, 0x00ffffff); 413 gr_def(ctx, 0x40248c, 0x00ffffff);
414 switch (dev_priv->chipset) { 414 switch (device->chipset) {
415 case 0x40: 415 case 0x40:
416 gr_def(ctx, 0x402490, 0x60103f00); 416 gr_def(ctx, 0x402490, 0x60103f00);
417 break; 417 break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
428 gr_def(ctx, 0x402490, 0x0c103f00); 428 gr_def(ctx, 0x402490, 0x0c103f00);
429 break; 429 break;
430 } 430 }
431 gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ? 431 gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
432 0x00020000 : 0x00040000); 432 0x00020000 : 0x00040000);
433 cp_ctx(ctx, 0x402500, 31); 433 cp_ctx(ctx, 0x402500, 31);
434 gr_def(ctx, 0x402530, 0x00008100); 434 gr_def(ctx, 0x402530, 0x00008100);
435 if (dev_priv->chipset == 0x40) 435 if (device->chipset == 0x40)
436 cp_ctx(ctx, 0x40257c, 6); 436 cp_ctx(ctx, 0x40257c, 6);
437 cp_ctx(ctx, 0x402594, 16); 437 cp_ctx(ctx, 0x402594, 16);
438 cp_ctx(ctx, 0x402800, 17); 438 cp_ctx(ctx, 0x402800, 17);
439 gr_def(ctx, 0x402800, 0x00000001); 439 gr_def(ctx, 0x402800, 0x00000001);
440 switch (dev_priv->chipset) { 440 switch (device->chipset) {
441 case 0x47: 441 case 0x47:
442 case 0x49: 442 case 0x49:
443 case 0x4b: 443 case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
445 gr_def(ctx, 0x402864, 0x00001001); 445 gr_def(ctx, 0x402864, 0x00001001);
446 cp_ctx(ctx, 0x402870, 3); 446 cp_ctx(ctx, 0x402870, 3);
447 gr_def(ctx, 0x402878, 0x00000003); 447 gr_def(ctx, 0x402878, 0x00000003);
448 if (dev_priv->chipset != 0x47) { /* belong at end!! */ 448 if (device->chipset != 0x47) { /* belong at end!! */
449 cp_ctx(ctx, 0x402900, 1); 449 cp_ctx(ctx, 0x402900, 1);
450 cp_ctx(ctx, 0x402940, 1); 450 cp_ctx(ctx, 0x402940, 1);
451 cp_ctx(ctx, 0x402980, 1); 451 cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
470 } 470 }
471 471
472 cp_ctx(ctx, 0x402c00, 4); 472 cp_ctx(ctx, 0x402c00, 4);
473 gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ? 473 gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
474 0x80800001 : 0x00888001); 474 0x80800001 : 0x00888001);
475 switch (dev_priv->chipset) { 475 switch (device->chipset) {
476 case 0x47: 476 case 0x47:
477 case 0x49: 477 case 0x49:
478 case 0x4b: 478 case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
485 break; 485 break;
486 default: 486 default:
487 cp_ctx(ctx, 0x402c10, 4); 487 cp_ctx(ctx, 0x402c10, 4);
488 if (dev_priv->chipset == 0x40) 488 if (device->chipset == 0x40)
489 cp_ctx(ctx, 0x402c20, 36); 489 cp_ctx(ctx, 0x402c20, 36);
490 else 490 else
491 if (dev_priv->chipset <= 0x42) 491 if (device->chipset <= 0x42)
492 cp_ctx(ctx, 0x402c20, 24); 492 cp_ctx(ctx, 0x402c20, 24);
493 else 493 else
494 if (dev_priv->chipset <= 0x4a) 494 if (device->chipset <= 0x4a)
495 cp_ctx(ctx, 0x402c20, 16); 495 cp_ctx(ctx, 0x402c20, 16);
496 else 496 else
497 cp_ctx(ctx, 0x402c20, 8); 497 cp_ctx(ctx, 0x402c20, 8);
498 cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13); 498 cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
499 gr_def(ctx, 0x402cd4, 0x00000005); 499 gr_def(ctx, 0x402cd4, 0x00000005);
500 if (dev_priv->chipset != 0x40) 500 if (device->chipset != 0x40)
501 gr_def(ctx, 0x402ce0, 0x0000ffff); 501 gr_def(ctx, 0x402ce0, 0x0000ffff);
502 break; 502 break;
503 } 503 }
504 504
505 cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3); 505 cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
506 cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3); 506 cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev)); 507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
508 for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++) 508 for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555); 509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
510 510
511 if (dev_priv->chipset != 0x40) { 511 if (device->chipset != 0x40) {
512 cp_ctx(ctx, 0x403600, 1); 512 cp_ctx(ctx, 0x403600, 1);
513 gr_def(ctx, 0x403600, 0x00000001); 513 gr_def(ctx, 0x403600, 0x00000001);
514 } 514 }
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
516 516
517 cp_ctx(ctx, 0x403c18, 1); 517 cp_ctx(ctx, 0x403c18, 1);
518 gr_def(ctx, 0x403c18, 0x00000001); 518 gr_def(ctx, 0x403c18, 0x00000001);
519 switch (dev_priv->chipset) { 519 switch (device->chipset) {
520 case 0x46: 520 case 0x46:
521 case 0x47: 521 case 0x47:
522 case 0x49: 522 case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
527 gr_def(ctx, 0x405c24, 0x000e3000); 527 gr_def(ctx, 0x405c24, 0x000e3000);
528 break; 528 break;
529 } 529 }
530 if (dev_priv->chipset != 0x4e) 530 if (device->chipset != 0x4e)
531 cp_ctx(ctx, 0x405800, 11); 531 cp_ctx(ctx, 0x405800, 11);
532 cp_ctx(ctx, 0x407000, 1); 532 cp_ctx(ctx, 0x407000, 1);
533} 533}
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
535static void 535static void
536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
537{ 537{
538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; 538 int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
539 539
540 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
541 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
550static void 550static void
551nv40_graph_construct_shader(struct nouveau_grctx *ctx) 551nv40_graph_construct_shader(struct nouveau_grctx *ctx)
552{ 552{
553 struct drm_device *dev = ctx->dev; 553 struct nouveau_device *device = ctx->device;
554 struct drm_nouveau_private *dev_priv = dev->dev_private;
555 struct nouveau_gpuobj *obj = ctx->data; 554 struct nouveau_gpuobj *obj = ctx->data;
556 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; 555 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
557 int offset, i; 556 int offset, i;
558 557
559 vs_nr = nv40_graph_vs_count(ctx->dev); 558 vs_nr = nv40_graph_vs_count(ctx->device);
560 vs_nr_b0 = 363; 559 vs_nr_b0 = 363;
561 vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64; 560 vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
562 if (dev_priv->chipset == 0x40) { 561 if (device->chipset == 0x40) {
563 b0_offset = 0x2200/4; /* 33a0 */ 562 b0_offset = 0x2200/4; /* 33a0 */
564 b1_offset = 0x55a0/4; /* 1500 */ 563 b1_offset = 0x55a0/4; /* 1500 */
565 vs_len = 0x6aa0/4; 564 vs_len = 0x6aa0/4;
566 } else 565 } else
567 if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) { 566 if (device->chipset == 0x41 || device->chipset == 0x42) {
568 b0_offset = 0x2200/4; /* 2200 */ 567 b0_offset = 0x2200/4; /* 2200 */
569 b1_offset = 0x4400/4; /* 0b00 */ 568 b1_offset = 0x4400/4; /* 0b00 */
570 vs_len = 0x4f00/4; 569 vs_len = 0x4f00/4;
571 } else { 570 } else {
572 b0_offset = 0x1d40/4; /* 2200 */ 571 b0_offset = 0x1d40/4; /* 2200 */
573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 572 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; 573 vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
575 } 574 }
576 575
577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 576 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); 577 cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
579 578
580 offset = ctx->ctxvals_pos; 579 offset = ctx->ctxvals_pos;
581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 580 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
661} 660}
662 661
663void 662void
664nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 663nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
665{ 664{
666 nv40_grctx_generate(&(struct nouveau_grctx) { 665 nv40_grctx_generate(&(struct nouveau_grctx) {
667 .dev = dev, 666 .device = device,
668 .mode = NOUVEAU_GRCTX_VALS, 667 .mode = NOUVEAU_GRCTX_VALS,
669 .data = mem, 668 .data = mem,
670 }); 669 });
671} 670}
672 671
673void 672void
674nv40_grctx_init(struct drm_device *dev, u32 *size) 673nv40_grctx_init(struct nouveau_device *device, u32 *size)
675{ 674{
676 u32 ctxprog[256], i; 675 u32 ctxprog[256], i;
677 struct nouveau_grctx ctx = { 676 struct nouveau_grctx ctx = {
678 .dev = dev, 677 .device = device,
679 .mode = NOUVEAU_GRCTX_PROG, 678 .mode = NOUVEAU_GRCTX_PROG,
680 .data = ctxprog, 679 .data = ctxprog,
681 .ctxprog_max = ARRAY_SIZE(ctxprog) 680 .ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
683 682
684 nv40_grctx_generate(&ctx); 683 nv40_grctx_generate(&ctx);
685 684
686 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 685 nv_wr32(device, 0x400324, 0);
687 for (i = 0; i < ctx.ctxprog_len; i++) 686 for (i = 0; i < ctx.ctxprog_len; i++)
688 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]); 687 nv_wr32(device, 0x400328, ctxprog[i]);
689 *size = ctx.ctxvals_pos * 4; 688 *size = ctx.ctxvals_pos * 4;
690} 689}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 881e22b249fc..552fdbd45ebe 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include <core/gpuobj.h>
24
23#define CP_FLAG_CLEAR 0 25#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1 26#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) 27#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -105,9 +107,8 @@
105#define CP_SEEK_1 0x00c000ff 107#define CP_SEEK_1 0x00c000ff
106#define CP_SEEK_2 0x00c800ff 108#define CP_SEEK_2 0x00c800ff
107 109
108#include "drmP.h" 110#include "nv50.h"
109#include "nouveau_drv.h" 111#include "ctx.h"
110#include "nouveau_grctx.h"
111 112
112#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
113#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) 114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
175static int 176static int
176nv50_grctx_generate(struct nouveau_grctx *ctx) 177nv50_grctx_generate(struct nouveau_grctx *ctx)
177{ 178{
178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
179
180 switch (dev_priv->chipset) {
181 case 0x50:
182 case 0x84:
183 case 0x86:
184 case 0x92:
185 case 0x94:
186 case 0x96:
187 case 0x98:
188 case 0xa0:
189 case 0xa3:
190 case 0xa5:
191 case 0xa8:
192 case 0xaa:
193 case 0xac:
194 case 0xaf:
195 break;
196 default:
197 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
198 "your NV%x card.\n", dev_priv->chipset);
199 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
200 "the devs.\n");
201 return -ENOSYS;
202 }
203
204 cp_set (ctx, STATE, RUNNING); 179 cp_set (ctx, STATE, RUNNING);
205 cp_set (ctx, XFER_SWITCH, ENABLE); 180 cp_set (ctx, XFER_SWITCH, ENABLE);
206 /* decide whether we're loading/unloading the context */ 181 /* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
278} 253}
279 254
280void 255void
281nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 256nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
282{ 257{
283 nv50_grctx_generate(&(struct nouveau_grctx) { 258 nv50_grctx_generate(&(struct nouveau_grctx) {
284 .dev = dev, 259 .device = device,
285 .mode = NOUVEAU_GRCTX_VALS, 260 .mode = NOUVEAU_GRCTX_VALS,
286 .data = mem, 261 .data = mem,
287 }); 262 });
288} 263}
289 264
290int 265int
291nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt) 266nv50_grctx_init(struct nouveau_device *device, u32 *size)
292{ 267{
268 u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
293 struct nouveau_grctx ctx = { 269 struct nouveau_grctx ctx = {
294 .dev = dev, 270 .device = device,
295 .mode = NOUVEAU_GRCTX_PROG, 271 .mode = NOUVEAU_GRCTX_PROG,
296 .data = data, 272 .data = ctxprog,
297 .ctxprog_max = max 273 .ctxprog_max = 512,
298 }; 274 };
299 int ret;
300 275
301 ret = nv50_grctx_generate(&ctx); 276 if (!ctxprog)
302 *cnt = ctx.ctxvals_pos * 4; 277 return -ENOMEM;
303 *len = ctx.ctxprog_len; 278 nv50_grctx_generate(&ctx);
304 return ret; 279
280 nv_wr32(device, 0x400324, 0);
281 for (i = 0; i < ctx.ctxprog_len; i++)
282 nv_wr32(device, 0x400328, ctxprog[i]);
283 *size = ctx.ctxvals_pos * 4;
284 kfree(ctxprog);
285 return 0;
305} 286}
306 287
307/* 288/*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
315static void 296static void
316nv50_graph_construct_mmio(struct nouveau_grctx *ctx) 297nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
317{ 298{
318 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 299 struct nouveau_device *device = ctx->device;
319 int i, j; 300 int i, j;
320 int offset, base; 301 int offset, base;
321 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 302 u32 units = nv_rd32 (ctx->device, 0x1540);
322 303
323 /* 0800: DISPATCH */ 304 /* 0800: DISPATCH */
324 cp_ctx(ctx, 0x400808, 7); 305 cp_ctx(ctx, 0x400808, 7);
325 gr_def(ctx, 0x400814, 0x00000030); 306 gr_def(ctx, 0x400814, 0x00000030);
326 cp_ctx(ctx, 0x400834, 0x32); 307 cp_ctx(ctx, 0x400834, 0x32);
327 if (dev_priv->chipset == 0x50) { 308 if (device->chipset == 0x50) {
328 gr_def(ctx, 0x400834, 0xff400040); 309 gr_def(ctx, 0x400834, 0xff400040);
329 gr_def(ctx, 0x400838, 0xfff00080); 310 gr_def(ctx, 0x400838, 0xfff00080);
330 gr_def(ctx, 0x40083c, 0xfff70090); 311 gr_def(ctx, 0x40083c, 0xfff70090);
331 gr_def(ctx, 0x400840, 0xffe806a8); 312 gr_def(ctx, 0x400840, 0xffe806a8);
332 } 313 }
333 gr_def(ctx, 0x400844, 0x00000002); 314 gr_def(ctx, 0x400844, 0x00000002);
334 if (IS_NVA3F(dev_priv->chipset)) 315 if (IS_NVA3F(device->chipset))
335 gr_def(ctx, 0x400894, 0x00001000); 316 gr_def(ctx, 0x400894, 0x00001000);
336 gr_def(ctx, 0x4008e8, 0x00000003); 317 gr_def(ctx, 0x4008e8, 0x00000003);
337 gr_def(ctx, 0x4008ec, 0x00001000); 318 gr_def(ctx, 0x4008ec, 0x00001000);
338 if (dev_priv->chipset == 0x50) 319 if (device->chipset == 0x50)
339 cp_ctx(ctx, 0x400908, 0xb); 320 cp_ctx(ctx, 0x400908, 0xb);
340 else if (dev_priv->chipset < 0xa0) 321 else if (device->chipset < 0xa0)
341 cp_ctx(ctx, 0x400908, 0xc); 322 cp_ctx(ctx, 0x400908, 0xc);
342 else 323 else
343 cp_ctx(ctx, 0x400908, 0xe); 324 cp_ctx(ctx, 0x400908, 0xe);
344 325
345 if (dev_priv->chipset >= 0xa0) 326 if (device->chipset >= 0xa0)
346 cp_ctx(ctx, 0x400b00, 0x1); 327 cp_ctx(ctx, 0x400b00, 0x1);
347 if (IS_NVA3F(dev_priv->chipset)) { 328 if (IS_NVA3F(device->chipset)) {
348 cp_ctx(ctx, 0x400b10, 0x1); 329 cp_ctx(ctx, 0x400b10, 0x1);
349 gr_def(ctx, 0x400b10, 0x0001629d); 330 gr_def(ctx, 0x400b10, 0x0001629d);
350 cp_ctx(ctx, 0x400b20, 0x1); 331 cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
358 gr_def(ctx, 0x400c08, 0x0000fe0c); 339 gr_def(ctx, 0x400c08, 0x0000fe0c);
359 340
360 /* 1000 */ 341 /* 1000 */
361 if (dev_priv->chipset < 0xa0) { 342 if (device->chipset < 0xa0) {
362 cp_ctx(ctx, 0x401008, 0x4); 343 cp_ctx(ctx, 0x401008, 0x4);
363 gr_def(ctx, 0x401014, 0x00001000); 344 gr_def(ctx, 0x401014, 0x00001000);
364 } else if (!IS_NVA3F(dev_priv->chipset)) { 345 } else if (!IS_NVA3F(device->chipset)) {
365 cp_ctx(ctx, 0x401008, 0x5); 346 cp_ctx(ctx, 0x401008, 0x5);
366 gr_def(ctx, 0x401018, 0x00001000); 347 gr_def(ctx, 0x401018, 0x00001000);
367 } else { 348 } else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
372 /* 1400 */ 353 /* 1400 */
373 cp_ctx(ctx, 0x401400, 0x8); 354 cp_ctx(ctx, 0x401400, 0x8);
374 cp_ctx(ctx, 0x401424, 0x3); 355 cp_ctx(ctx, 0x401424, 0x3);
375 if (dev_priv->chipset == 0x50) 356 if (device->chipset == 0x50)
376 gr_def(ctx, 0x40142c, 0x0001fd87); 357 gr_def(ctx, 0x40142c, 0x0001fd87);
377 else 358 else
378 gr_def(ctx, 0x40142c, 0x00000187); 359 gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
382 /* 1800: STREAMOUT */ 363 /* 1800: STREAMOUT */
383 cp_ctx(ctx, 0x401814, 0x1); 364 cp_ctx(ctx, 0x401814, 0x1);
384 gr_def(ctx, 0x401814, 0x000000ff); 365 gr_def(ctx, 0x401814, 0x000000ff);
385 if (dev_priv->chipset == 0x50) { 366 if (device->chipset == 0x50) {
386 cp_ctx(ctx, 0x40181c, 0xe); 367 cp_ctx(ctx, 0x40181c, 0xe);
387 gr_def(ctx, 0x401850, 0x00000004); 368 gr_def(ctx, 0x401850, 0x00000004);
388 } else if (dev_priv->chipset < 0xa0) { 369 } else if (device->chipset < 0xa0) {
389 cp_ctx(ctx, 0x40181c, 0xf); 370 cp_ctx(ctx, 0x40181c, 0xf);
390 gr_def(ctx, 0x401854, 0x00000004); 371 gr_def(ctx, 0x401854, 0x00000004);
391 } else { 372 } else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
395 376
396 /* 1C00 */ 377 /* 1C00 */
397 cp_ctx(ctx, 0x401c00, 0x1); 378 cp_ctx(ctx, 0x401c00, 0x1);
398 switch (dev_priv->chipset) { 379 switch (device->chipset) {
399 case 0x50: 380 case 0x50:
400 gr_def(ctx, 0x401c00, 0x0001005f); 381 gr_def(ctx, 0x401c00, 0x0001005f);
401 break; 382 break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
424 405
425 /* 2400 */ 406 /* 2400 */
426 cp_ctx(ctx, 0x402400, 0x1); 407 cp_ctx(ctx, 0x402400, 0x1);
427 if (dev_priv->chipset == 0x50) 408 if (device->chipset == 0x50)
428 cp_ctx(ctx, 0x402408, 0x1); 409 cp_ctx(ctx, 0x402408, 0x1);
429 else 410 else
430 cp_ctx(ctx, 0x402408, 0x2); 411 cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
432 413
433 /* 2800: CSCHED */ 414 /* 2800: CSCHED */
434 cp_ctx(ctx, 0x402800, 0x1); 415 cp_ctx(ctx, 0x402800, 0x1);
435 if (dev_priv->chipset == 0x50) 416 if (device->chipset == 0x50)
436 gr_def(ctx, 0x402800, 0x00000006); 417 gr_def(ctx, 0x402800, 0x00000006);
437 418
438 /* 2C00: ZCULL */ 419 /* 2C00: ZCULL */
439 cp_ctx(ctx, 0x402c08, 0x6); 420 cp_ctx(ctx, 0x402c08, 0x6);
440 if (dev_priv->chipset != 0x50) 421 if (device->chipset != 0x50)
441 gr_def(ctx, 0x402c14, 0x01000000); 422 gr_def(ctx, 0x402c14, 0x01000000);
442 gr_def(ctx, 0x402c18, 0x000000ff); 423 gr_def(ctx, 0x402c18, 0x000000ff);
443 if (dev_priv->chipset == 0x50) 424 if (device->chipset == 0x50)
444 cp_ctx(ctx, 0x402ca0, 0x1); 425 cp_ctx(ctx, 0x402ca0, 0x1);
445 else 426 else
446 cp_ctx(ctx, 0x402ca0, 0x2); 427 cp_ctx(ctx, 0x402ca0, 0x2);
447 if (dev_priv->chipset < 0xa0) 428 if (device->chipset < 0xa0)
448 gr_def(ctx, 0x402ca0, 0x00000400); 429 gr_def(ctx, 0x402ca0, 0x00000400);
449 else if (!IS_NVA3F(dev_priv->chipset)) 430 else if (!IS_NVA3F(device->chipset))
450 gr_def(ctx, 0x402ca0, 0x00000800); 431 gr_def(ctx, 0x402ca0, 0x00000800);
451 else 432 else
452 gr_def(ctx, 0x402ca0, 0x00000400); 433 gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
457 gr_def(ctx, 0x403004, 0x00000001); 438 gr_def(ctx, 0x403004, 0x00000001);
458 439
459 /* 3400 */ 440 /* 3400 */
460 if (dev_priv->chipset >= 0xa0) { 441 if (device->chipset >= 0xa0) {
461 cp_ctx(ctx, 0x403404, 0x1); 442 cp_ctx(ctx, 0x403404, 0x1);
462 gr_def(ctx, 0x403404, 0x00000001); 443 gr_def(ctx, 0x403404, 0x00000001);
463 } 444 }
464 445
465 /* 5000: CCACHE */ 446 /* 5000: CCACHE */
466 cp_ctx(ctx, 0x405000, 0x1); 447 cp_ctx(ctx, 0x405000, 0x1);
467 switch (dev_priv->chipset) { 448 switch (device->chipset) {
468 case 0x50: 449 case 0x50:
469 gr_def(ctx, 0x405000, 0x00300080); 450 gr_def(ctx, 0x405000, 0x00300080);
470 break; 451 break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
493 cp_ctx(ctx, 0x40502c, 0x1); 474 cp_ctx(ctx, 0x40502c, 0x1);
494 475
495 /* 6000? */ 476 /* 6000? */
496 if (dev_priv->chipset == 0x50) 477 if (device->chipset == 0x50)
497 cp_ctx(ctx, 0x4063e0, 0x1); 478 cp_ctx(ctx, 0x4063e0, 0x1);
498 479
499 /* 6800: M2MF */ 480 /* 6800: M2MF */
500 if (dev_priv->chipset < 0x90) { 481 if (device->chipset < 0x90) {
501 cp_ctx(ctx, 0x406814, 0x2b); 482 cp_ctx(ctx, 0x406814, 0x2b);
502 gr_def(ctx, 0x406818, 0x00000f80); 483 gr_def(ctx, 0x406818, 0x00000f80);
503 gr_def(ctx, 0x406860, 0x007f0080); 484 gr_def(ctx, 0x406860, 0x007f0080);
504 gr_def(ctx, 0x40689c, 0x007f0080); 485 gr_def(ctx, 0x40689c, 0x007f0080);
505 } else { 486 } else {
506 cp_ctx(ctx, 0x406814, 0x4); 487 cp_ctx(ctx, 0x406814, 0x4);
507 if (dev_priv->chipset == 0x98) 488 if (device->chipset == 0x98)
508 gr_def(ctx, 0x406818, 0x00000f80); 489 gr_def(ctx, 0x406818, 0x00000f80);
509 else 490 else
510 gr_def(ctx, 0x406818, 0x00001f80); 491 gr_def(ctx, 0x406818, 0x00001f80);
511 if (IS_NVA3F(dev_priv->chipset)) 492 if (IS_NVA3F(device->chipset))
512 gr_def(ctx, 0x40681c, 0x00000030); 493 gr_def(ctx, 0x40681c, 0x00000030);
513 cp_ctx(ctx, 0x406830, 0x3); 494 cp_ctx(ctx, 0x406830, 0x3);
514 } 495 }
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
517 for (i = 0; i < 8; i++) { 498 for (i = 0; i < 8; i++) {
518 if (units & (1<<(i+16))) { 499 if (units & (1<<(i+16))) {
519 cp_ctx(ctx, 0x407000 + (i<<8), 3); 500 cp_ctx(ctx, 0x407000 + (i<<8), 3);
520 if (dev_priv->chipset == 0x50) 501 if (device->chipset == 0x50)
521 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); 502 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
522 else if (dev_priv->chipset != 0xa5) 503 else if (device->chipset != 0xa5)
523 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); 504 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
524 else 505 else
525 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); 506 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
526 gr_def(ctx, 0x407004 + (i<<8), 0x89058001); 507 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
527 508
528 if (dev_priv->chipset == 0x50) { 509 if (device->chipset == 0x50) {
529 cp_ctx(ctx, 0x407010 + (i<<8), 1); 510 cp_ctx(ctx, 0x407010 + (i<<8), 1);
530 } else if (dev_priv->chipset < 0xa0) { 511 } else if (device->chipset < 0xa0) {
531 cp_ctx(ctx, 0x407010 + (i<<8), 2); 512 cp_ctx(ctx, 0x407010 + (i<<8), 2);
532 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 513 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
533 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); 514 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
534 } else { 515 } else {
535 cp_ctx(ctx, 0x407010 + (i<<8), 3); 516 cp_ctx(ctx, 0x407010 + (i<<8), 3);
536 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 517 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
537 if (dev_priv->chipset != 0xa5) 518 if (device->chipset != 0xa5)
538 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); 519 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
539 else 520 else
540 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); 521 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
541 } 522 }
542 523
543 cp_ctx(ctx, 0x407080 + (i<<8), 4); 524 cp_ctx(ctx, 0x407080 + (i<<8), 4);
544 if (dev_priv->chipset != 0xa5) 525 if (device->chipset != 0xa5)
545 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); 526 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
546 else 527 else
547 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); 528 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
548 if (dev_priv->chipset == 0x50) 529 if (device->chipset == 0x50)
549 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); 530 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
550 else 531 else
551 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); 532 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
552 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); 533 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
553 534
554 if (dev_priv->chipset < 0xa0) 535 if (device->chipset < 0xa0)
555 cp_ctx(ctx, 0x407094 + (i<<8), 1); 536 cp_ctx(ctx, 0x407094 + (i<<8), 1);
556 else if (!IS_NVA3F(dev_priv->chipset)) 537 else if (!IS_NVA3F(device->chipset))
557 cp_ctx(ctx, 0x407094 + (i<<8), 3); 538 cp_ctx(ctx, 0x407094 + (i<<8), 3);
558 else { 539 else {
559 cp_ctx(ctx, 0x407094 + (i<<8), 4); 540 cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
563 } 544 }
564 545
565 cp_ctx(ctx, 0x407c00, 0x3); 546 cp_ctx(ctx, 0x407c00, 0x3);
566 if (dev_priv->chipset < 0x90) 547 if (device->chipset < 0x90)
567 gr_def(ctx, 0x407c00, 0x00010040); 548 gr_def(ctx, 0x407c00, 0x00010040);
568 else if (dev_priv->chipset < 0xa0) 549 else if (device->chipset < 0xa0)
569 gr_def(ctx, 0x407c00, 0x00390040); 550 gr_def(ctx, 0x407c00, 0x00390040);
570 else 551 else
571 gr_def(ctx, 0x407c00, 0x003d0040); 552 gr_def(ctx, 0x407c00, 0x003d0040);
572 gr_def(ctx, 0x407c08, 0x00000022); 553 gr_def(ctx, 0x407c08, 0x00000022);
573 if (dev_priv->chipset >= 0xa0) { 554 if (device->chipset >= 0xa0) {
574 cp_ctx(ctx, 0x407c10, 0x3); 555 cp_ctx(ctx, 0x407c10, 0x3);
575 cp_ctx(ctx, 0x407c20, 0x1); 556 cp_ctx(ctx, 0x407c20, 0x1);
576 cp_ctx(ctx, 0x407c2c, 0x1); 557 cp_ctx(ctx, 0x407c2c, 0x1);
577 } 558 }
578 559
579 if (dev_priv->chipset < 0xa0) { 560 if (device->chipset < 0xa0) {
580 cp_ctx(ctx, 0x407d00, 0x9); 561 cp_ctx(ctx, 0x407d00, 0x9);
581 } else { 562 } else {
582 cp_ctx(ctx, 0x407d00, 0x15); 563 cp_ctx(ctx, 0x407d00, 0x15);
583 } 564 }
584 if (dev_priv->chipset == 0x98) 565 if (device->chipset == 0x98)
585 gr_def(ctx, 0x407d08, 0x00380040); 566 gr_def(ctx, 0x407d08, 0x00380040);
586 else { 567 else {
587 if (dev_priv->chipset < 0x90) 568 if (device->chipset < 0x90)
588 gr_def(ctx, 0x407d08, 0x00010040); 569 gr_def(ctx, 0x407d08, 0x00010040);
589 else if (dev_priv->chipset < 0xa0) 570 else if (device->chipset < 0xa0)
590 gr_def(ctx, 0x407d08, 0x00390040); 571 gr_def(ctx, 0x407d08, 0x00390040);
591 else 572 else
592 gr_def(ctx, 0x407d08, 0x003d0040); 573 gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
596 /* 8000+: per-TP state */ 577 /* 8000+: per-TP state */
597 for (i = 0; i < 10; i++) { 578 for (i = 0; i < 10; i++) {
598 if (units & (1<<i)) { 579 if (units & (1<<i)) {
599 if (dev_priv->chipset < 0xa0) 580 if (device->chipset < 0xa0)
600 base = 0x408000 + (i<<12); 581 base = 0x408000 + (i<<12);
601 else 582 else
602 base = 0x408000 + (i<<11); 583 base = 0x408000 + (i<<11);
603 if (dev_priv->chipset < 0xa0) 584 if (device->chipset < 0xa0)
604 offset = base + 0xc00; 585 offset = base + 0xc00;
605 else 586 else
606 offset = base + 0x80; 587 offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
609 cp_ctx(ctx, offset + 0x08, 1); 590 cp_ctx(ctx, offset + 0x08, 1);
610 591
611 /* per-MP state */ 592 /* per-MP state */
612 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { 593 for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
613 if (!(units & (1 << (j+24)))) continue; 594 if (!(units & (1 << (j+24)))) continue;
614 if (dev_priv->chipset < 0xa0) 595 if (device->chipset < 0xa0)
615 offset = base + 0x200 + (j<<7); 596 offset = base + 0x200 + (j<<7);
616 else 597 else
617 offset = base + 0x100 + (j<<7); 598 offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
620 gr_def(ctx, offset + 0x04, 0x00160000); 601 gr_def(ctx, offset + 0x04, 0x00160000);
621 gr_def(ctx, offset + 0x08, 0x01800000); 602 gr_def(ctx, offset + 0x08, 0x01800000);
622 gr_def(ctx, offset + 0x18, 0x0003ffff); 603 gr_def(ctx, offset + 0x18, 0x0003ffff);
623 switch (dev_priv->chipset) { 604 switch (device->chipset) {
624 case 0x50: 605 case 0x50:
625 gr_def(ctx, offset + 0x1c, 0x00080000); 606 gr_def(ctx, offset + 0x1c, 0x00080000);
626 break; 607 break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
651 break; 632 break;
652 } 633 }
653 gr_def(ctx, offset + 0x40, 0x00010401); 634 gr_def(ctx, offset + 0x40, 0x00010401);
654 if (dev_priv->chipset == 0x50) 635 if (device->chipset == 0x50)
655 gr_def(ctx, offset + 0x48, 0x00000040); 636 gr_def(ctx, offset + 0x48, 0x00000040);
656 else 637 else
657 gr_def(ctx, offset + 0x48, 0x00000078); 638 gr_def(ctx, offset + 0x48, 0x00000078);
658 gr_def(ctx, offset + 0x50, 0x000000bf); 639 gr_def(ctx, offset + 0x50, 0x000000bf);
659 gr_def(ctx, offset + 0x58, 0x00001210); 640 gr_def(ctx, offset + 0x58, 0x00001210);
660 if (dev_priv->chipset == 0x50) 641 if (device->chipset == 0x50)
661 gr_def(ctx, offset + 0x5c, 0x00000080); 642 gr_def(ctx, offset + 0x5c, 0x00000080);
662 else 643 else
663 gr_def(ctx, offset + 0x5c, 0x08000080); 644 gr_def(ctx, offset + 0x5c, 0x08000080);
664 if (dev_priv->chipset >= 0xa0) 645 if (device->chipset >= 0xa0)
665 gr_def(ctx, offset + 0x68, 0x0000003e); 646 gr_def(ctx, offset + 0x68, 0x0000003e);
666 } 647 }
667 648
668 if (dev_priv->chipset < 0xa0) 649 if (device->chipset < 0xa0)
669 cp_ctx(ctx, base + 0x300, 0x4); 650 cp_ctx(ctx, base + 0x300, 0x4);
670 else 651 else
671 cp_ctx(ctx, base + 0x300, 0x5); 652 cp_ctx(ctx, base + 0x300, 0x5);
672 if (dev_priv->chipset == 0x50) 653 if (device->chipset == 0x50)
673 gr_def(ctx, base + 0x304, 0x00007070); 654 gr_def(ctx, base + 0x304, 0x00007070);
674 else if (dev_priv->chipset < 0xa0) 655 else if (device->chipset < 0xa0)
675 gr_def(ctx, base + 0x304, 0x00027070); 656 gr_def(ctx, base + 0x304, 0x00027070);
676 else if (!IS_NVA3F(dev_priv->chipset)) 657 else if (!IS_NVA3F(device->chipset))
677 gr_def(ctx, base + 0x304, 0x01127070); 658 gr_def(ctx, base + 0x304, 0x01127070);
678 else 659 else
679 gr_def(ctx, base + 0x304, 0x05127070); 660 gr_def(ctx, base + 0x304, 0x05127070);
680 661
681 if (dev_priv->chipset < 0xa0) 662 if (device->chipset < 0xa0)
682 cp_ctx(ctx, base + 0x318, 1); 663 cp_ctx(ctx, base + 0x318, 1);
683 else 664 else
684 cp_ctx(ctx, base + 0x320, 1); 665 cp_ctx(ctx, base + 0x320, 1);
685 if (dev_priv->chipset == 0x50) 666 if (device->chipset == 0x50)
686 gr_def(ctx, base + 0x318, 0x0003ffff); 667 gr_def(ctx, base + 0x318, 0x0003ffff);
687 else if (dev_priv->chipset < 0xa0) 668 else if (device->chipset < 0xa0)
688 gr_def(ctx, base + 0x318, 0x03ffffff); 669 gr_def(ctx, base + 0x318, 0x03ffffff);
689 else 670 else
690 gr_def(ctx, base + 0x320, 0x07ffffff); 671 gr_def(ctx, base + 0x320, 0x07ffffff);
691 672
692 if (dev_priv->chipset < 0xa0) 673 if (device->chipset < 0xa0)
693 cp_ctx(ctx, base + 0x324, 5); 674 cp_ctx(ctx, base + 0x324, 5);
694 else 675 else
695 cp_ctx(ctx, base + 0x328, 4); 676 cp_ctx(ctx, base + 0x328, 4);
696 677
697 if (dev_priv->chipset < 0xa0) { 678 if (device->chipset < 0xa0) {
698 cp_ctx(ctx, base + 0x340, 9); 679 cp_ctx(ctx, base + 0x340, 9);
699 offset = base + 0x340; 680 offset = base + 0x340;
700 } else if (!IS_NVA3F(dev_priv->chipset)) { 681 } else if (!IS_NVA3F(device->chipset)) {
701 cp_ctx(ctx, base + 0x33c, 0xb); 682 cp_ctx(ctx, base + 0x33c, 0xb);
702 offset = base + 0x344; 683 offset = base + 0x344;
703 } else { 684 } else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
706 } 687 }
707 gr_def(ctx, offset + 0x0, 0x00120407); 688 gr_def(ctx, offset + 0x0, 0x00120407);
708 gr_def(ctx, offset + 0x4, 0x05091507); 689 gr_def(ctx, offset + 0x4, 0x05091507);
709 if (dev_priv->chipset == 0x84) 690 if (device->chipset == 0x84)
710 gr_def(ctx, offset + 0x8, 0x05100202); 691 gr_def(ctx, offset + 0x8, 0x05100202);
711 else 692 else
712 gr_def(ctx, offset + 0x8, 0x05010202); 693 gr_def(ctx, offset + 0x8, 0x05010202);
713 gr_def(ctx, offset + 0xc, 0x00030201); 694 gr_def(ctx, offset + 0xc, 0x00030201);
714 if (dev_priv->chipset == 0xa3) 695 if (device->chipset == 0xa3)
715 cp_ctx(ctx, base + 0x36c, 1); 696 cp_ctx(ctx, base + 0x36c, 1);
716 697
717 cp_ctx(ctx, base + 0x400, 2); 698 cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
720 gr_def(ctx, base + 0x40c, 0x0d0c0b0a); 701 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
721 gr_def(ctx, base + 0x410, 0x00141210); 702 gr_def(ctx, base + 0x410, 0x00141210);
722 703
723 if (dev_priv->chipset < 0xa0) 704 if (device->chipset < 0xa0)
724 offset = base + 0x800; 705 offset = base + 0x800;
725 else 706 else
726 offset = base + 0x500; 707 offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
728 gr_def(ctx, offset + 0x0, 0x000001f0); 709 gr_def(ctx, offset + 0x0, 0x000001f0);
729 gr_def(ctx, offset + 0x4, 0x00000001); 710 gr_def(ctx, offset + 0x4, 0x00000001);
730 gr_def(ctx, offset + 0x8, 0x00000003); 711 gr_def(ctx, offset + 0x8, 0x00000003);
731 if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) 712 if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
732 gr_def(ctx, offset + 0xc, 0x00008000); 713 gr_def(ctx, offset + 0xc, 0x00008000);
733 gr_def(ctx, offset + 0x14, 0x00039e00); 714 gr_def(ctx, offset + 0x14, 0x00039e00);
734 cp_ctx(ctx, offset + 0x1c, 2); 715 cp_ctx(ctx, offset + 0x1c, 2);
735 if (dev_priv->chipset == 0x50) 716 if (device->chipset == 0x50)
736 gr_def(ctx, offset + 0x1c, 0x00000040); 717 gr_def(ctx, offset + 0x1c, 0x00000040);
737 else 718 else
738 gr_def(ctx, offset + 0x1c, 0x00000100); 719 gr_def(ctx, offset + 0x1c, 0x00000100);
739 gr_def(ctx, offset + 0x20, 0x00003800); 720 gr_def(ctx, offset + 0x20, 0x00003800);
740 721
741 if (dev_priv->chipset >= 0xa0) { 722 if (device->chipset >= 0xa0) {
742 cp_ctx(ctx, base + 0x54c, 2); 723 cp_ctx(ctx, base + 0x54c, 2);
743 if (!IS_NVA3F(dev_priv->chipset)) 724 if (!IS_NVA3F(device->chipset))
744 gr_def(ctx, base + 0x54c, 0x003fe006); 725 gr_def(ctx, base + 0x54c, 0x003fe006);
745 else 726 else
746 gr_def(ctx, base + 0x54c, 0x003fe007); 727 gr_def(ctx, base + 0x54c, 0x003fe007);
747 gr_def(ctx, base + 0x550, 0x003fe000); 728 gr_def(ctx, base + 0x550, 0x003fe000);
748 } 729 }
749 730
750 if (dev_priv->chipset < 0xa0) 731 if (device->chipset < 0xa0)
751 offset = base + 0xa00; 732 offset = base + 0xa00;
752 else 733 else
753 offset = base + 0x680; 734 offset = base + 0x680;
754 cp_ctx(ctx, offset, 1); 735 cp_ctx(ctx, offset, 1);
755 gr_def(ctx, offset, 0x00404040); 736 gr_def(ctx, offset, 0x00404040);
756 737
757 if (dev_priv->chipset < 0xa0) 738 if (device->chipset < 0xa0)
758 offset = base + 0xe00; 739 offset = base + 0xe00;
759 else 740 else
760 offset = base + 0x700; 741 offset = base + 0x700;
761 cp_ctx(ctx, offset, 2); 742 cp_ctx(ctx, offset, 2);
762 if (dev_priv->chipset < 0xa0) 743 if (device->chipset < 0xa0)
763 gr_def(ctx, offset, 0x0077f005); 744 gr_def(ctx, offset, 0x0077f005);
764 else if (dev_priv->chipset == 0xa5) 745 else if (device->chipset == 0xa5)
765 gr_def(ctx, offset, 0x6cf7f007); 746 gr_def(ctx, offset, 0x6cf7f007);
766 else if (dev_priv->chipset == 0xa8) 747 else if (device->chipset == 0xa8)
767 gr_def(ctx, offset, 0x6cfff007); 748 gr_def(ctx, offset, 0x6cfff007);
768 else if (dev_priv->chipset == 0xac) 749 else if (device->chipset == 0xac)
769 gr_def(ctx, offset, 0x0cfff007); 750 gr_def(ctx, offset, 0x0cfff007);
770 else 751 else
771 gr_def(ctx, offset, 0x0cf7f007); 752 gr_def(ctx, offset, 0x0cf7f007);
772 if (dev_priv->chipset == 0x50) 753 if (device->chipset == 0x50)
773 gr_def(ctx, offset + 0x4, 0x00007fff); 754 gr_def(ctx, offset + 0x4, 0x00007fff);
774 else if (dev_priv->chipset < 0xa0) 755 else if (device->chipset < 0xa0)
775 gr_def(ctx, offset + 0x4, 0x003f7fff); 756 gr_def(ctx, offset + 0x4, 0x003f7fff);
776 else 757 else
777 gr_def(ctx, offset + 0x4, 0x02bf7fff); 758 gr_def(ctx, offset + 0x4, 0x02bf7fff);
778 cp_ctx(ctx, offset + 0x2c, 1); 759 cp_ctx(ctx, offset + 0x2c, 1);
779 if (dev_priv->chipset == 0x50) { 760 if (device->chipset == 0x50) {
780 cp_ctx(ctx, offset + 0x50, 9); 761 cp_ctx(ctx, offset + 0x50, 9);
781 gr_def(ctx, offset + 0x54, 0x000003ff); 762 gr_def(ctx, offset + 0x54, 0x000003ff);
782 gr_def(ctx, offset + 0x58, 0x00000003); 763 gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
785 gr_def(ctx, offset + 0x64, 0x0000001f); 766 gr_def(ctx, offset + 0x64, 0x0000001f);
786 gr_def(ctx, offset + 0x68, 0x0000000f); 767 gr_def(ctx, offset + 0x68, 0x0000000f);
787 gr_def(ctx, offset + 0x6c, 0x0000000f); 768 gr_def(ctx, offset + 0x6c, 0x0000000f);
788 } else if (dev_priv->chipset < 0xa0) { 769 } else if (device->chipset < 0xa0) {
789 cp_ctx(ctx, offset + 0x50, 1); 770 cp_ctx(ctx, offset + 0x50, 1);
790 cp_ctx(ctx, offset + 0x70, 1); 771 cp_ctx(ctx, offset + 0x70, 1);
791 } else { 772 } else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
797} 778}
798 779
799static void 780static void
800dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 781dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
801 int i; 782 int i;
802 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 783 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
803 for (i = 0; i < num; i++) 784 for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
808static void 789static void
809nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) 790nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
810{ 791{
811 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 792 struct nouveau_device *device = ctx->device;
812 int base, num; 793 int base, num;
813 base = ctx->ctxvals_pos; 794 base = ctx->ctxvals_pos;
814 795
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
822 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ 803 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
823 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ 804 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
824 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ 805 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
825 if (dev_priv->chipset >= 0x94) 806 if (device->chipset >= 0x94)
826 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ 807 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
827 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ 808 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
828 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ 809 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
851 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ 832 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
852 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ 833 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
853 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ 834 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
854 if (IS_NVA3F(dev_priv->chipset)) 835 if (IS_NVA3F(device->chipset))
855 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ 836 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
856 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ 837 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
857 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ 838 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
863 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ 844 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
864 845
865 /* compat 2d state */ 846 /* compat 2d state */
866 if (dev_priv->chipset == 0x50) { 847 if (device->chipset == 0x50) {
867 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ 848 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
868 849
869 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ 850 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
923 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ 904 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
924 905
925 /* more compat 2d state */ 906 /* more compat 2d state */
926 if (dev_priv->chipset == 0x50) { 907 if (device->chipset == 0x50) {
927 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ 908 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
928 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ 909 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
929 910
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
957 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ 938 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
958 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ 939 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
959 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ 940 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
960 if (IS_NVA3F(dev_priv->chipset)) { 941 if (IS_NVA3F(device->chipset)) {
961 dd_emit(ctx, 1, 0); /* ffffffff */ 942 dd_emit(ctx, 1, 0); /* ffffffff */
962 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 943 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
963 } else { 944 } else {
964 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 945 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
965 } 946 }
966 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 947 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
967 if (dev_priv->chipset != 0x50) 948 if (device->chipset != 0x50)
968 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ 949 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
969 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ 950 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
970 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ 951 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
971 if (dev_priv->chipset == 0x50) { 952 if (device->chipset == 0x50) {
972 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ 953 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
973 dd_emit(ctx, 1, 0); /* 00000001 */ 954 dd_emit(ctx, 1, 0); /* 00000001 */
974 } else { 955 } else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
994 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ 975 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
995 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ 976 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
996 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ 977 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
997 if (dev_priv->chipset != 0x50) 978 if (device->chipset != 0x50)
998 dd_emit(ctx, 3, 0); /* 1, 1, 1 */ 979 dd_emit(ctx, 3, 0); /* 1, 1, 1 */
999 else 980 else
1000 dd_emit(ctx, 2, 0); /* 1, 1 */ 981 dd_emit(ctx, 2, 0); /* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1002 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ 983 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
1003 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 984 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1004 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 985 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1005 if (IS_NVA3F(dev_priv->chipset)) { 986 if (IS_NVA3F(device->chipset)) {
1006 dd_emit(ctx, 1, 3); /* 00000003 */ 987 dd_emit(ctx, 1, 3); /* 00000003 */
1007 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ 988 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
1008 } 989 }
1009 if (dev_priv->chipset != 0x50) 990 if (device->chipset != 0x50)
1010 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ 991 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
1011 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ 992 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
1012 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ 993 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
1013 if (dev_priv->chipset != 0x50) 994 if (device->chipset != 0x50)
1014 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ 995 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
1015 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ 996 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
1016 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ 997 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1022 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 1003 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
1023 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ 1004 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
1024 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 1005 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
1025 if (dev_priv->chipset >= 0xa0) 1006 if (device->chipset >= 0xa0)
1026 dd_emit(ctx, 1, 0); /* ffffffff */ 1007 dd_emit(ctx, 1, 0); /* ffffffff */
1027 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ 1008 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
1028 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ 1009 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
1029 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ 1010 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
1030 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ 1011 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
1031 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ 1012 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
1032 if (dev_priv->chipset != 0x50) 1013 if (device->chipset != 0x50)
1033 dd_emit(ctx, 8, 0); /* 00000001 */ 1014 dd_emit(ctx, 8, 0); /* 00000001 */
1034 if (dev_priv->chipset >= 0xa0) { 1015 if (device->chipset >= 0xa0) {
1035 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ 1016 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
1036 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ 1017 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
1037 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ 1018 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1042 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 1023 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1043 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ 1024 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
1044 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ 1025 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
1045 if (IS_NVA3F(dev_priv->chipset)) 1026 if (IS_NVA3F(device->chipset))
1046 dd_emit(ctx, 1, 0); /* 00000001 */ 1027 dd_emit(ctx, 1, 0); /* 00000001 */
1047 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ 1028 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
1048 if (dev_priv->chipset >= 0xa0) 1029 if (device->chipset >= 0xa0)
1049 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ 1030 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
1050 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ 1031 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
1051 if (dev_priv->chipset >= 0xa0) 1032 if (device->chipset >= 0xa0)
1052 dd_emit(ctx, 1, 0); /* 00000003 */ 1033 dd_emit(ctx, 1, 0); /* 00000003 */
1053 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ 1034 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
1054 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ 1035 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
1055 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ 1036 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
1056 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ 1037 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
1057 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ 1038 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
1058 if (dev_priv->chipset != 0x50) { 1039 if (device->chipset != 0x50) {
1059 dd_emit(ctx, 1, 0xe00); /* 7fff */ 1040 dd_emit(ctx, 1, 0xe00); /* 7fff */
1060 dd_emit(ctx, 1, 0x1000); /* 7fff */ 1041 dd_emit(ctx, 1, 0x1000); /* 7fff */
1061 dd_emit(ctx, 1, 0x1e00); /* 7fff */ 1042 dd_emit(ctx, 1, 0x1e00); /* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1070 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ 1051 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
1071 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ 1052 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
1072 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ 1053 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
1073 if (IS_NVA3F(dev_priv->chipset)) 1054 if (IS_NVA3F(device->chipset))
1074 dd_emit(ctx, 1, 0x200); 1055 dd_emit(ctx, 1, 0x200);
1075 dd_emit(ctx, 1, 0); /* 00000001 */ 1056 dd_emit(ctx, 1, 0); /* 00000001 */
1076 if (dev_priv->chipset < 0xa0) { 1057 if (device->chipset < 0xa0) {
1077 dd_emit(ctx, 1, 1); /* 00000001 */ 1058 dd_emit(ctx, 1, 1); /* 00000001 */
1078 dd_emit(ctx, 1, 0x70); /* 000000ff */ 1059 dd_emit(ctx, 1, 0x70); /* 000000ff */
1079 dd_emit(ctx, 1, 0x80); /* 000000ff */ 1060 dd_emit(ctx, 1, 0x80); /* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1120 1101
1121 num = ctx->ctxvals_pos - base; 1102 num = ctx->ctxvals_pos - base;
1122 ctx->ctxvals_pos = base; 1103 ctx->ctxvals_pos = base;
1123 if (IS_NVA3F(dev_priv->chipset)) 1104 if (IS_NVA3F(device->chipset))
1124 cp_ctx(ctx, 0x404800, num); 1105 cp_ctx(ctx, 0x404800, num);
1125 else 1106 else
1126 cp_ctx(ctx, 0x405400, num); 1107 cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1169 */ 1150 */
1170 1151
1171static void 1152static void
1172xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 1153xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
1173 int i; 1154 int i;
1174 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 1155 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
1175 for (i = 0; i < num; i++) 1156 for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1201static void 1182static void
1202nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) 1183nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1203{ 1184{
1204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1185 struct nouveau_device *device = ctx->device;
1205 int i; 1186 int i;
1206 int offset; 1187 int offset;
1207 int size = 0; 1188 int size = 0;
1208 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 1189 u32 units = nv_rd32 (ctx->device, 0x1540);
1209 1190
1210 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 1191 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1211 ctx->ctxvals_base = offset; 1192 ctx->ctxvals_base = offset;
1212 1193
1213 if (dev_priv->chipset < 0xa0) { 1194 if (device->chipset < 0xa0) {
1214 /* Strand 0 */ 1195 /* Strand 0 */
1215 ctx->ctxvals_pos = offset; 1196 ctx->ctxvals_pos = offset;
1216 nv50_graph_construct_gene_dispatch(ctx); 1197 nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1280 1261
1281 /* Strand 2 */ 1262 /* Strand 2 */
1282 ctx->ctxvals_pos = offset + 2; 1263 ctx->ctxvals_pos = offset + 2;
1283 if (dev_priv->chipset == 0xa0) 1264 if (device->chipset == 0xa0)
1284 nv50_graph_construct_gene_unk14xx(ctx); 1265 nv50_graph_construct_gene_unk14xx(ctx);
1285 nv50_graph_construct_gene_unk24xx(ctx); 1266 nv50_graph_construct_gene_unk24xx(ctx);
1286 if ((ctx->ctxvals_pos-offset)/8 > size) 1267 if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1327 1308
1328 /* Strand 7 */ 1309 /* Strand 7 */
1329 ctx->ctxvals_pos = offset + 7; 1310 ctx->ctxvals_pos = offset + 7;
1330 if (dev_priv->chipset == 0xa0) { 1311 if (device->chipset == 0xa0) {
1331 if (units & (1 << 4)) 1312 if (units & (1 << 4))
1332 nv50_graph_construct_xfer_tp(ctx); 1313 nv50_graph_construct_xfer_tp(ctx);
1333 if (units & (1 << 5)) 1314 if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
1365nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) 1346nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1366{ 1347{
1367 /* start of strand 0 */ 1348 /* start of strand 0 */
1368 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1349 struct nouveau_device *device = ctx->device;
1369 /* SEEK */ 1350 /* SEEK */
1370 if (dev_priv->chipset == 0x50) 1351 if (device->chipset == 0x50)
1371 xf_emit(ctx, 5, 0); 1352 xf_emit(ctx, 5, 0);
1372 else if (!IS_NVA3F(dev_priv->chipset)) 1353 else if (!IS_NVA3F(device->chipset))
1373 xf_emit(ctx, 6, 0); 1354 xf_emit(ctx, 6, 0);
1374 else 1355 else
1375 xf_emit(ctx, 4, 0); 1356 xf_emit(ctx, 4, 0);
1376 /* SEEK */ 1357 /* SEEK */
1377 /* the PGRAPH's internal FIFO */ 1358 /* the PGRAPH's internal FIFO */
1378 if (dev_priv->chipset == 0x50) 1359 if (device->chipset == 0x50)
1379 xf_emit(ctx, 8*3, 0); 1360 xf_emit(ctx, 8*3, 0);
1380 else 1361 else
1381 xf_emit(ctx, 0x100*3, 0); 1362 xf_emit(ctx, 0x100*3, 0);
1382 /* and another bonus slot?!? */ 1363 /* and another bonus slot?!? */
1383 xf_emit(ctx, 3, 0); 1364 xf_emit(ctx, 3, 0);
1384 /* and YET ANOTHER bonus slot? */ 1365 /* and YET ANOTHER bonus slot? */
1385 if (IS_NVA3F(dev_priv->chipset)) 1366 if (IS_NVA3F(device->chipset))
1386 xf_emit(ctx, 3, 0); 1367 xf_emit(ctx, 3, 0);
1387 /* SEEK */ 1368 /* SEEK */
1388 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ 1369 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1394 /* SEEK */ 1375 /* SEEK */
1395 xf_emit(ctx, 9, 0); 1376 xf_emit(ctx, 9, 0);
1396 /* SEEK */ 1377 /* SEEK */
1397 if (dev_priv->chipset < 0x90) 1378 if (device->chipset < 0x90)
1398 xf_emit(ctx, 4, 0); 1379 xf_emit(ctx, 4, 0);
1399 /* SEEK */ 1380 /* SEEK */
1400 xf_emit(ctx, 2, 0); 1381 xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1407 xf_emit(ctx, 6*2, 0); 1388 xf_emit(ctx, 6*2, 0);
1408 xf_emit(ctx, 2, 0); 1389 xf_emit(ctx, 2, 0);
1409 /* SEEK */ 1390 /* SEEK */
1410 if (dev_priv->chipset == 0x50) 1391 if (device->chipset == 0x50)
1411 xf_emit(ctx, 0x1c, 0); 1392 xf_emit(ctx, 0x1c, 0);
1412 else if (dev_priv->chipset < 0xa0) 1393 else if (device->chipset < 0xa0)
1413 xf_emit(ctx, 0x1e, 0); 1394 xf_emit(ctx, 0x1e, 0);
1414 else 1395 else
1415 xf_emit(ctx, 0x22, 0); 1396 xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
1421nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) 1402nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1422{ 1403{
1423 /* Strand 0, right after dispatch */ 1404 /* Strand 0, right after dispatch */
1424 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1405 struct nouveau_device *device = ctx->device;
1425 int smallm2mf = 0; 1406 int smallm2mf = 0;
1426 if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) 1407 if (device->chipset < 0x92 || device->chipset == 0x98)
1427 smallm2mf = 1; 1408 smallm2mf = 1;
1428 /* SEEK */ 1409 /* SEEK */
1429 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ 1410 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1472static void 1453static void
1473nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) 1454nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1474{ 1455{
1475 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1456 struct nouveau_device *device = ctx->device;
1476 xf_emit(ctx, 2, 0); /* RO */ 1457 xf_emit(ctx, 2, 0); /* RO */
1477 xf_emit(ctx, 0x800, 0); /* ffffffff */ 1458 xf_emit(ctx, 0x800, 0); /* ffffffff */
1478 switch (dev_priv->chipset) { 1459 switch (device->chipset) {
1479 case 0x50: 1460 case 0x50:
1480 case 0x92: 1461 case 0x92:
1481 case 0xa0: 1462 case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1540static void 1521static void
1541nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) 1522nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1542{ 1523{
1543 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1524 struct nouveau_device *device = ctx->device;
1544 int i; 1525 int i;
1545 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1526 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1546 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1527 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1550 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 1531 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1551 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1532 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1552 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1533 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1553 if (dev_priv->chipset == 0x50) 1534 if (device->chipset == 0x50)
1554 xf_emit(ctx, 1, 0x3ff); 1535 xf_emit(ctx, 1, 0x3ff);
1555 else 1536 else
1556 xf_emit(ctx, 1, 0x7ff); /* 000007ff */ 1537 xf_emit(ctx, 1, 0x7ff); /* 000007ff */
1557 xf_emit(ctx, 1, 0); /* 111/113 */ 1538 xf_emit(ctx, 1, 0); /* 111/113 */
1558 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1539 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1559 for (i = 0; i < 8; i++) { 1540 for (i = 0; i < 8; i++) {
1560 switch (dev_priv->chipset) { 1541 switch (device->chipset) {
1561 case 0x50: 1542 case 0x50:
1562 case 0x86: 1543 case 0x86:
1563 case 0x98: 1544 case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1600static void 1581static void
1601nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) 1582nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1602{ 1583{
1603 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1584 struct nouveau_device *device = ctx->device;
1604 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1585 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1605 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1586 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1606 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ 1587 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1614 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1595 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1615 xf_emit(ctx, 1, 0); /* 00000007 */ 1596 xf_emit(ctx, 1, 0); /* 00000007 */
1616 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ 1597 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
1617 if (dev_priv->chipset >= 0xa0) 1598 if (device->chipset >= 0xa0)
1618 xf_emit(ctx, 1, 0x0fac6881); 1599 xf_emit(ctx, 1, 0x0fac6881);
1619 if (IS_NVA3F(dev_priv->chipset)) { 1600 if (IS_NVA3F(device->chipset)) {
1620 xf_emit(ctx, 1, 1); 1601 xf_emit(ctx, 1, 1);
1621 xf_emit(ctx, 3, 0); 1602 xf_emit(ctx, 3, 0);
1622 } 1603 }
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1625static void 1606static void
1626nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) 1607nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1627{ 1608{
1628 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1609 struct nouveau_device *device = ctx->device;
1629 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ 1610 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1630 if (dev_priv->chipset != 0x50) { 1611 if (device->chipset != 0x50) {
1631 xf_emit(ctx, 5, 0); /* ffffffff */ 1612 xf_emit(ctx, 5, 0); /* ffffffff */
1632 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1613 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1633 xf_emit(ctx, 1, 0); /* 00000001 */ 1614 xf_emit(ctx, 1, 0); /* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1643 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1624 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1644 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ 1625 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1645 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ 1626 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
1646 if (dev_priv->chipset != 0x50) 1627 if (device->chipset != 0x50)
1647 xf_emit(ctx, 1, 0); /* 3ff */ 1628 xf_emit(ctx, 1, 0); /* 3ff */
1648 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ 1629 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
1649 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ 1630 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
1650 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ 1631 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
1651 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ 1632 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1652 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1633 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1653 if (dev_priv->chipset != 0x50) 1634 if (device->chipset != 0x50)
1654 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ 1635 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
1655 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1636 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1656 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ 1637 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1669 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ 1650 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
1670 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ 1651 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1671 xf_emit(ctx, 1, 0); /* 0000000f */ 1652 xf_emit(ctx, 1, 0); /* 0000000f */
1672 if (dev_priv->chipset == 0x50) 1653 if (device->chipset == 0x50)
1673 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 1654 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
1674 else 1655 else
1675 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 1656 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1704 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 1685 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1705 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 1686 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1706 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ 1687 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1707 if (IS_NVA3F(dev_priv->chipset)) 1688 if (IS_NVA3F(device->chipset))
1708 xf_emit(ctx, 1, 0); /* 00000001 */ 1689 xf_emit(ctx, 1, 0); /* 00000001 */
1709 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1690 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1710 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ 1691 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1711 if (dev_priv->chipset != 0x50) { 1692 if (device->chipset != 0x50) {
1712 xf_emit(ctx, 1, 0); /* ffffffff */ 1693 xf_emit(ctx, 1, 0); /* ffffffff */
1713 xf_emit(ctx, 1, 0); /* 00000001 */ 1694 xf_emit(ctx, 1, 0); /* 00000001 */
1714 xf_emit(ctx, 1, 0); /* 000003ff */ 1695 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1736static void 1717static void
1737nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) 1718nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1738{ 1719{
1739 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1720 struct nouveau_device *device = ctx->device;
1740 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ 1721 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
1741 /* SEEK */ 1722 /* SEEK */
1742 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ 1723 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1774 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 1755 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1775 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 1756 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
1776 xf_emit(ctx, 1, 0); /* 00000007 */ 1757 xf_emit(ctx, 1, 0); /* 00000007 */
1777 if (dev_priv->chipset != 0x50) 1758 if (device->chipset != 0x50)
1778 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ 1759 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
1779 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 1760 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1780 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 1761 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1789 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1770 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1790 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ 1771 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
1791 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ 1772 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
1792 if (dev_priv->chipset != 0x50) 1773 if (device->chipset != 0x50)
1793 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ 1774 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
1794 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ 1775 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
1795} 1776}
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
1817static void 1798static void
1818nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) 1799nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1819{ 1800{
1820 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1801 struct nouveau_device *device = ctx->device;
1821 int i; 1802 int i;
1822 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ 1803 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
1823 /* SEEK */ 1804 /* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1829 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1810 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1830 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1811 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1831 /* SEEK */ 1812 /* SEEK */
1832 if (IS_NVA3F(dev_priv->chipset)) { 1813 if (IS_NVA3F(device->chipset)) {
1833 xf_emit(ctx, 4, 0); /* RO */ 1814 xf_emit(ctx, 4, 0); /* RO */
1834 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ 1815 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1835 xf_emit(ctx, 1, 0); /* 1ff */ 1816 xf_emit(ctx, 1, 0); /* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1860 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1841 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1861 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1842 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1862 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1843 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1863 if (dev_priv->chipset != 0x50) 1844 if (device->chipset != 0x50)
1864 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 1845 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1865 /* SEEK */ 1846 /* SEEK */
1866 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1847 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1869 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1850 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1870 xf_emit(ctx, 1, 1); /* 00000001 */ 1851 xf_emit(ctx, 1, 1); /* 00000001 */
1871 /* SEEK */ 1852 /* SEEK */
1872 if (dev_priv->chipset >= 0xa0) 1853 if (device->chipset >= 0xa0)
1873 xf_emit(ctx, 2, 4); /* 000000ff */ 1854 xf_emit(ctx, 2, 4); /* 000000ff */
1874 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1855 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1875 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1856 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1893 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ 1874 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
1894 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1875 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1895 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1876 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1896 if (dev_priv->chipset != 0x50) 1877 if (device->chipset != 0x50)
1897 xf_emit(ctx, 1, 0); /* 000003ff */ 1878 xf_emit(ctx, 1, 0); /* 000003ff */
1898} 1879}
1899 1880
1900static void 1881static void
1901nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) 1882nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1902{ 1883{
1903 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1884 struct nouveau_device *device = ctx->device;
1904 int acnt = 0x10, rep, i; 1885 int acnt = 0x10, rep, i;
1905 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ 1886 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
1906 if (IS_NVA3F(dev_priv->chipset)) 1887 if (IS_NVA3F(device->chipset))
1907 acnt = 0x20; 1888 acnt = 0x20;
1908 /* SEEK */ 1889 /* SEEK */
1909 if (dev_priv->chipset >= 0xa0) { 1890 if (device->chipset >= 0xa0) {
1910 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ 1891 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
1911 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ 1892 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
1912 } 1893 }
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1923 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ 1904 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
1924 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1905 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1925 /* SEEK */ 1906 /* SEEK */
1926 if (IS_NVA3F(dev_priv->chipset)) 1907 if (IS_NVA3F(device->chipset))
1927 xf_emit(ctx, 0xb, 0); /* RO */ 1908 xf_emit(ctx, 0xb, 0); /* RO */
1928 else if (dev_priv->chipset >= 0xa0) 1909 else if (device->chipset >= 0xa0)
1929 xf_emit(ctx, 0x9, 0); /* RO */ 1910 xf_emit(ctx, 0x9, 0); /* RO */
1930 else 1911 else
1931 xf_emit(ctx, 0x8, 0); /* RO */ 1912 xf_emit(ctx, 0x8, 0); /* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1944 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ 1925 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
1945 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ 1926 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
1946 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1927 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1947 if (dev_priv->chipset == 0x50) 1928 if (device->chipset == 0x50)
1948 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ 1929 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
1949 else 1930 else
1950 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ 1931 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
1951 if (dev_priv->chipset == 0xa8) 1932 if (device->chipset == 0xa8)
1952 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 1933 xf_emit(ctx, 1, 0x1e00); /* 7fff */
1953 /* SEEK */ 1934 /* SEEK */
1954 xf_emit(ctx, 0xc, 0); /* RO or close */ 1935 xf_emit(ctx, 0xc, 0); /* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1956 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 1937 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1957 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 1938 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
1958 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 1939 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1959 if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) 1940 if (device->chipset > 0x50 && device->chipset < 0xa0)
1960 xf_emit(ctx, 2, 0); /* ffffffff */ 1941 xf_emit(ctx, 2, 0); /* ffffffff */
1961 else 1942 else
1962 xf_emit(ctx, 1, 0); /* ffffffff */ 1943 xf_emit(ctx, 1, 0); /* ffffffff */
1963 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ 1944 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
1964 /* SEEK */ 1945 /* SEEK */
1965 if (IS_NVA3F(dev_priv->chipset)) { 1946 if (IS_NVA3F(device->chipset)) {
1966 xf_emit(ctx, 0x10, 0); /* 0? */ 1947 xf_emit(ctx, 0x10, 0); /* 0? */
1967 xf_emit(ctx, 2, 0); /* weird... */ 1948 xf_emit(ctx, 2, 0); /* weird... */
1968 xf_emit(ctx, 2, 0); /* RO */ 1949 xf_emit(ctx, 2, 0); /* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1975 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ 1956 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
1976 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ 1957 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
1977 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ 1958 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
1978 if (dev_priv->chipset >= 0xa0) 1959 if (device->chipset >= 0xa0)
1979 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ 1960 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
1980 /* SEEK */ 1961 /* SEEK */
1981 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ 1962 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2013 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ 1994 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
2014 xf_emit(ctx, 3, 0); /* f/1f */ 1995 xf_emit(ctx, 3, 0); /* f/1f */
2015 /* SEEK */ 1996 /* SEEK */
2016 if (IS_NVA3F(dev_priv->chipset)) { 1997 if (IS_NVA3F(device->chipset)) {
2017 xf_emit(ctx, acnt, 0); /* f */ 1998 xf_emit(ctx, acnt, 0); /* f */
2018 xf_emit(ctx, 3, 0); /* f/1f */ 1999 xf_emit(ctx, 3, 0); /* f/1f */
2019 } 2000 }
2020 /* SEEK */ 2001 /* SEEK */
2021 if (IS_NVA3F(dev_priv->chipset)) 2002 if (IS_NVA3F(device->chipset))
2022 xf_emit(ctx, 2, 0); /* RO */ 2003 xf_emit(ctx, 2, 0); /* RO */
2023 else 2004 else
2024 xf_emit(ctx, 5, 0); /* RO */ 2005 xf_emit(ctx, 5, 0); /* RO */
2025 /* SEEK */ 2006 /* SEEK */
2026 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ 2007 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
2027 /* SEEK */ 2008 /* SEEK */
2028 if (dev_priv->chipset < 0xa0) { 2009 if (device->chipset < 0xa0) {
2029 xf_emit(ctx, 0x41, 0); /* RO */ 2010 xf_emit(ctx, 0x41, 0); /* RO */
2030 /* SEEK */ 2011 /* SEEK */
2031 xf_emit(ctx, 0x11, 0); /* RO */ 2012 xf_emit(ctx, 0x11, 0); /* RO */
2032 } else if (!IS_NVA3F(dev_priv->chipset)) 2013 } else if (!IS_NVA3F(device->chipset))
2033 xf_emit(ctx, 0x50, 0); /* RO */ 2014 xf_emit(ctx, 0x50, 0); /* RO */
2034 else 2015 else
2035 xf_emit(ctx, 0x58, 0); /* RO */ 2016 xf_emit(ctx, 0x58, 0); /* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2041 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ 2022 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
2042 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ 2023 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
2043 /* SEEK */ 2024 /* SEEK */
2044 if (IS_NVA3F(dev_priv->chipset)) 2025 if (IS_NVA3F(device->chipset))
2045 xf_emit(ctx, 0x1d, 0); /* RO */ 2026 xf_emit(ctx, 0x1d, 0); /* RO */
2046 else 2027 else
2047 xf_emit(ctx, 0x16, 0); /* RO */ 2028 xf_emit(ctx, 0x16, 0); /* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2049 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 2030 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
2050 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 2031 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2051 /* SEEK */ 2032 /* SEEK */
2052 if (dev_priv->chipset < 0xa0) 2033 if (device->chipset < 0xa0)
2053 xf_emit(ctx, 8, 0); /* RO */ 2034 xf_emit(ctx, 8, 0); /* RO */
2054 else if (IS_NVA3F(dev_priv->chipset)) 2035 else if (IS_NVA3F(device->chipset))
2055 xf_emit(ctx, 0xc, 0); /* RO */ 2036 xf_emit(ctx, 0xc, 0); /* RO */
2056 else 2037 else
2057 xf_emit(ctx, 7, 0); /* RO */ 2038 xf_emit(ctx, 7, 0); /* RO */
2058 /* SEEK */ 2039 /* SEEK */
2059 xf_emit(ctx, 0xa, 0); /* RO */ 2040 xf_emit(ctx, 0xa, 0); /* RO */
2060 if (dev_priv->chipset == 0xa0) 2041 if (device->chipset == 0xa0)
2061 rep = 0xc; 2042 rep = 0xc;
2062 else 2043 else
2063 rep = 4; 2044 rep = 4;
2064 for (i = 0; i < rep; i++) { 2045 for (i = 0; i < rep; i++) {
2065 /* SEEK */ 2046 /* SEEK */
2066 if (IS_NVA3F(dev_priv->chipset)) 2047 if (IS_NVA3F(device->chipset))
2067 xf_emit(ctx, 0x20, 0); /* ffffffff */ 2048 xf_emit(ctx, 0x20, 0); /* ffffffff */
2068 xf_emit(ctx, 0x200, 0); /* ffffffff */ 2049 xf_emit(ctx, 0x200, 0); /* ffffffff */
2069 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ 2050 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2077 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 2058 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
2078 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2059 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2079 /* SEEK */ 2060 /* SEEK */
2080 if (IS_NVA3F(dev_priv->chipset)) 2061 if (IS_NVA3F(device->chipset))
2081 xf_emit(ctx, 7, 0); /* weird... */ 2062 xf_emit(ctx, 7, 0); /* weird... */
2082 else 2063 else
2083 xf_emit(ctx, 5, 0); /* weird... */ 2064 xf_emit(ctx, 5, 0); /* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2086static void 2067static void
2087nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) 2068nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2088{ 2069{
2089 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2070 struct nouveau_device *device = ctx->device;
2090 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ 2071 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
2091 /* SEEK */ 2072 /* SEEK */
2092 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ 2073 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
2093 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ 2074 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
2094 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ 2075 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
2095 if (dev_priv->chipset < 0xa0) { 2076 if (device->chipset < 0xa0) {
2096 /* this is useless on everything but the original NV50, 2077 /* this is useless on everything but the original NV50,
2097 * guess they forgot to nuke it. Or just didn't bother. */ 2078 * guess they forgot to nuke it. Or just didn't bother. */
2098 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ 2079 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2148static void 2129static void
2149nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) 2130nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2150{ 2131{
2151 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2132 struct nouveau_device *device = ctx->device;
2152 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ 2133 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
2153 /* SEEK */ 2134 /* SEEK */
2154 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ 2135 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2173 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 2154 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2174 /* SEEK */ 2155 /* SEEK */
2175 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ 2156 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
2176 switch (dev_priv->chipset) { 2157 switch (device->chipset) {
2177 case 0x50: 2158 case 0x50:
2178 case 0x92: 2159 case 0x92:
2179 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ 2160 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2247static void 2228static void
2248nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) 2229nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2249{ 2230{
2250 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2231 struct nouveau_device *device = ctx->device;
2251 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ 2232 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
2252 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ 2233 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
2253 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2234 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2277 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ 2258 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2278 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2259 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2279 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2260 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2280 if (IS_NVA3F(dev_priv->chipset)) 2261 if (IS_NVA3F(device->chipset))
2281 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ 2262 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
2282 else if (dev_priv->chipset >= 0xa0) 2263 else if (device->chipset >= 0xa0)
2283 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ 2264 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
2284 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 2265 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
2285 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 2266 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2293 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ 2274 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
2294 xf_emit(ctx, 1, 0); /* 00000001 */ 2275 xf_emit(ctx, 1, 0); /* 00000001 */
2295 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ 2276 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
2296 if (dev_priv->chipset != 0x50) { 2277 if (device->chipset != 0x50) {
2297 xf_emit(ctx, 1, 0); /* 3ff */ 2278 xf_emit(ctx, 1, 0); /* 3ff */
2298 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ 2279 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
2299 } 2280 }
2300 if (IS_NVA3F(dev_priv->chipset)) 2281 if (IS_NVA3F(device->chipset))
2301 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ 2282 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
2302 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ 2283 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
2303 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ 2284 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2316 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2297 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2317 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 2298 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
2318 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ 2299 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
2319 if (dev_priv->chipset != 0x50) 2300 if (device->chipset != 0x50)
2320 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2301 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2321 if (dev_priv->chipset < 0xa0) 2302 if (device->chipset < 0xa0)
2322 xf_emit(ctx, 0x1c, 0); /* RO */ 2303 xf_emit(ctx, 0x1c, 0); /* RO */
2323 else if (IS_NVA3F(dev_priv->chipset)) 2304 else if (IS_NVA3F(device->chipset))
2324 xf_emit(ctx, 0x9, 0); 2305 xf_emit(ctx, 0x9, 0);
2325 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2306 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2326 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2307 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2328 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 2309 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2329 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 2310 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
2330 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 2311 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
2331 if (dev_priv->chipset != 0x50) { 2312 if (device->chipset != 0x50) {
2332 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2313 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2333 xf_emit(ctx, 1, 0); /* 3ff */ 2314 xf_emit(ctx, 1, 0); /* 3ff */
2334 } 2315 }
2335 /* XXX: the following block could belong either to unk1cxx, or 2316 /* XXX: the following block could belong either to unk1cxx, or
2336 * to STRMOUT. Rather hard to tell. */ 2317 * to STRMOUT. Rather hard to tell. */
2337 if (dev_priv->chipset < 0xa0) 2318 if (device->chipset < 0xa0)
2338 xf_emit(ctx, 0x25, 0); 2319 xf_emit(ctx, 0x25, 0);
2339 else 2320 else
2340 xf_emit(ctx, 0x3b, 0); 2321 xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2343static void 2324static void
2344nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) 2325nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2345{ 2326{
2346 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2327 struct nouveau_device *device = ctx->device;
2347 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ 2328 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
2348 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ 2329 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
2349 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2330 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2350 if (dev_priv->chipset >= 0xa0) { 2331 if (device->chipset >= 0xa0) {
2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2332 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2352 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2333 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2353 } 2334 }
2354 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 2335 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2355 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 2336 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
2356 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2337 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2357 if (dev_priv->chipset == 0x50) 2338 if (device->chipset == 0x50)
2358 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 2339 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
2359 else 2340 else
2360 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 2341 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2365 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ 2346 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
2366 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ 2347 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
2367 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2348 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2368 if (dev_priv->chipset >= 0xa0) { 2349 if (device->chipset >= 0xa0) {
2369 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2350 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2370 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2371 } 2352 }
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2385static void 2366static void
2386nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) 2367nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2387{ 2368{
2388 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2369 struct nouveau_device *device = ctx->device;
2389 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ 2370 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
2390 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ 2371 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
2391 xf_emit(ctx, 1, 0); /* 00000007 */ 2372 xf_emit(ctx, 1, 0); /* 00000007 */
2392 xf_emit(ctx, 1, 0); /* 000003ff */ 2373 xf_emit(ctx, 1, 0); /* 000003ff */
2393 if (IS_NVA3F(dev_priv->chipset)) 2374 if (IS_NVA3F(device->chipset))
2394 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2375 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2395 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2376 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2396} 2377}
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2398static void 2379static void
2399nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) 2380nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2400{ 2381{
2401 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2382 struct nouveau_device *device = ctx->device;
2402 /* SEEK */ 2383 /* SEEK */
2403 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ 2384 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2404 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2385 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2416 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ 2397 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
2417 xf_emit(ctx, 1, 0); /* ff/3ff */ 2398 xf_emit(ctx, 1, 0); /* ff/3ff */
2418 xf_emit(ctx, 1, 0); /* 00000007 */ 2399 xf_emit(ctx, 1, 0); /* 00000007 */
2419 if (IS_NVA3F(dev_priv->chipset)) 2400 if (IS_NVA3F(device->chipset))
2420 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2401 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2421 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2402 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2422} 2403}
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2424static void 2405static void
2425nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) 2406nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2426{ 2407{
2427 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2408 struct nouveau_device *device = ctx->device;
2428 int magic2; 2409 int magic2;
2429 if (dev_priv->chipset == 0x50) { 2410 if (device->chipset == 0x50) {
2430 magic2 = 0x00003e60; 2411 magic2 = 0x00003e60;
2431 } else if (!IS_NVA3F(dev_priv->chipset)) { 2412 } else if (!IS_NVA3F(device->chipset)) {
2432 magic2 = 0x001ffe67; 2413 magic2 = 0x001ffe67;
2433 } else { 2414 } else {
2434 magic2 = 0x00087e67; 2415 magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2446 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ 2427 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
2447 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2428 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2448 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ 2429 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2449 if (IS_NVA3F(dev_priv->chipset)) 2430 if (IS_NVA3F(device->chipset))
2450 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2431 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2451 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ 2432 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2452 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ 2433 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2453 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ 2434 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2454 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ 2435 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
2455 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ 2436 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2456 if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) 2437 if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
2457 xf_emit(ctx, 1, 0x15); /* 000000ff */ 2438 xf_emit(ctx, 1, 0x15); /* 000000ff */
2458 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 2439 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2459 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ 2440 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2462 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2443 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2463 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2444 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2464 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2445 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2465 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { 2446 if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
2466 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ 2447 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
2467 xf_emit(ctx, 1, 4); /* 7 */ 2448 xf_emit(ctx, 1, 4); /* 7 */
2468 xf_emit(ctx, 1, 0x400); /* fffffff */ 2449 xf_emit(ctx, 1, 0x400); /* fffffff */
2469 xf_emit(ctx, 1, 0x300); /* ffff */ 2450 xf_emit(ctx, 1, 0x300); /* ffff */
2470 xf_emit(ctx, 1, 0x1001); /* 1fff */ 2451 xf_emit(ctx, 1, 0x1001); /* 1fff */
2471 if (dev_priv->chipset != 0xa0) { 2452 if (device->chipset != 0xa0) {
2472 if (IS_NVA3F(dev_priv->chipset)) 2453 if (IS_NVA3F(device->chipset))
2473 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ 2454 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
2474 else 2455 else
2475 xf_emit(ctx, 1, 0x15); /* ff */ 2456 xf_emit(ctx, 1, 0x15); /* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2547 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2528 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2548 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 2529 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
2549 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ 2530 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
2550 if (dev_priv->chipset >= 0xa0) { 2531 if (device->chipset >= 0xa0) {
2551 xf_emit(ctx, 2, 0); 2532 xf_emit(ctx, 2, 0);
2552 xf_emit(ctx, 1, 0x1001); 2533 xf_emit(ctx, 1, 0x1001);
2553 xf_emit(ctx, 0xb, 0); 2534 xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2564 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ 2545 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2565 xf_emit(ctx, 1, 0x11); /* 3f/7f */ 2546 xf_emit(ctx, 1, 0x11); /* 3f/7f */
2566 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2547 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2567 if (dev_priv->chipset != 0x50) { 2548 if (device->chipset != 0x50) {
2568 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2549 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2569 xf_emit(ctx, 1, 0); /* 000000ff */ 2550 xf_emit(ctx, 1, 0); /* 000000ff */
2570 } 2551 }
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2581 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2562 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2582 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2563 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2583 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2564 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2584 if (IS_NVA3F(dev_priv->chipset)) { 2565 if (IS_NVA3F(device->chipset)) {
2585 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ 2566 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
2586 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2567 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2587 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2568 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2600 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2581 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2601 xf_emit(ctx, 1, 0); /* 00000001 */ 2582 xf_emit(ctx, 1, 0); /* 00000001 */
2602 xf_emit(ctx, 1, 0); /* 000003ff */ 2583 xf_emit(ctx, 1, 0); /* 000003ff */
2603 } else if (dev_priv->chipset >= 0xa0) { 2584 } else if (device->chipset >= 0xa0) {
2604 xf_emit(ctx, 2, 0); /* 00000001 */ 2585 xf_emit(ctx, 2, 0); /* 00000001 */
2605 xf_emit(ctx, 1, 0); /* 00000007 */ 2586 xf_emit(ctx, 1, 0); /* 00000007 */
2606 xf_emit(ctx, 1, 0); /* 00000003 */ 2587 xf_emit(ctx, 1, 0); /* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2614 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ 2595 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
2615 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ 2596 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
2616 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ 2597 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
2617 if (dev_priv->chipset >= 0xa0) 2598 if (device->chipset >= 0xa0)
2618 xf_emit(ctx, 2, 0); /* 00000001 */ 2599 xf_emit(ctx, 2, 0); /* 00000001 */
2619 xf_emit(ctx, 1, 0); /* 000003ff */ 2600 xf_emit(ctx, 1, 0); /* 000003ff */
2620 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ 2601 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2628 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ 2609 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
2629 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2610 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2630 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2611 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2631 if (dev_priv->chipset >= 0xa0) 2612 if (device->chipset >= 0xa0)
2632 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ 2613 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
2633 if (IS_NVA3F(dev_priv->chipset)) { 2614 if (IS_NVA3F(device->chipset)) {
2634 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ 2615 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
2635 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2616 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2636 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 2617 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2659static void 2640static void
2660nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) 2641nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2661{ 2642{
2662 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2643 struct nouveau_device *device = ctx->device;
2663 int magic3; 2644 int magic3;
2664 switch (dev_priv->chipset) { 2645 switch (device->chipset) {
2665 case 0x50: 2646 case 0x50:
2666 magic3 = 0x1000; 2647 magic3 = 0x1000;
2667 break; 2648 break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2681 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2662 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2682 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2663 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2683 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ 2664 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
2684 if (IS_NVA3F(dev_priv->chipset)) 2665 if (IS_NVA3F(device->chipset))
2685 xf_emit(ctx, 0x1f, 0); /* ffffffff */ 2666 xf_emit(ctx, 0x1f, 0); /* ffffffff */
2686 else if (dev_priv->chipset >= 0xa0) 2667 else if (device->chipset >= 0xa0)
2687 xf_emit(ctx, 0x0f, 0); /* ffffffff */ 2668 xf_emit(ctx, 0x0f, 0); /* ffffffff */
2688 else 2669 else
2689 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ 2670 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
2690 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ 2671 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
2691 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ 2672 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
2692 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ 2673 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
2693 if (dev_priv->chipset >= 0xa0) 2674 if (device->chipset >= 0xa0)
2694 xf_emit(ctx, 1, 0x03020100); /* ffffffff */ 2675 xf_emit(ctx, 1, 0x03020100); /* ffffffff */
2695 else 2676 else
2696 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ 2677 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2733 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2714 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2734 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2715 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2735 xf_emit(ctx, 1, 0); /* 111/113 */ 2716 xf_emit(ctx, 1, 0); /* 111/113 */
2736 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) 2717 if (device->chipset == 0x94 || device->chipset == 0x96)
2737 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2718 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2738 else if (dev_priv->chipset < 0xa0) 2719 else if (device->chipset < 0xa0)
2739 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2720 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2740 else if (!IS_NVA3F(dev_priv->chipset)) 2721 else if (!IS_NVA3F(device->chipset))
2741 xf_emit(ctx, 0x210, 0); /* ffffffff */ 2722 xf_emit(ctx, 0x210, 0); /* ffffffff */
2742 else 2723 else
2743 xf_emit(ctx, 0x410, 0); /* ffffffff */ 2724 xf_emit(ctx, 0x410, 0); /* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2751static void 2732static void
2752nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) 2733nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2753{ 2734{
2754 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2735 struct nouveau_device *device = ctx->device;
2755 int magic1, magic2; 2736 int magic1, magic2;
2756 if (dev_priv->chipset == 0x50) { 2737 if (device->chipset == 0x50) {
2757 magic1 = 0x3ff; 2738 magic1 = 0x3ff;
2758 magic2 = 0x00003e60; 2739 magic2 = 0x00003e60;
2759 } else if (!IS_NVA3F(dev_priv->chipset)) { 2740 } else if (!IS_NVA3F(device->chipset)) {
2760 magic1 = 0x7ff; 2741 magic1 = 0x7ff;
2761 magic2 = 0x001ffe67; 2742 magic2 = 0x001ffe67;
2762 } else { 2743 } else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2766 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2747 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2767 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ 2748 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
2768 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2749 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2769 if (IS_NVA3F(dev_priv->chipset)) 2750 if (IS_NVA3F(device->chipset))
2770 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ 2751 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
2771 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2752 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2772 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ 2753 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2800 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ 2781 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
2801 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2782 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2802 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2783 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2803 if (IS_NVA3F(dev_priv->chipset)) { 2784 if (IS_NVA3F(device->chipset)) {
2804 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 2785 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
2805 xf_emit(ctx, 1, 0); /* 00000003 */ 2786 xf_emit(ctx, 1, 0); /* 00000003 */
2806 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ 2787 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
2807 } else if (dev_priv->chipset >= 0xa0) { 2788 } else if (device->chipset >= 0xa0) {
2808 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ 2789 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
2809 xf_emit(ctx, 1, 0); /* 00000003 */ 2790 xf_emit(ctx, 1, 0); /* 00000003 */
2810 } else { 2791 } else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2818 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ 2799 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
2819 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ 2800 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
2820 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ 2801 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
2821 if (IS_NVA3F(dev_priv->chipset)) { 2802 if (IS_NVA3F(device->chipset)) {
2822 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 2803 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
2823 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2804 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2824 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2805 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2846 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ 2827 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
2847 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ 2828 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
2848 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ 2829 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
2849 if (IS_NVA3F(dev_priv->chipset)) 2830 if (IS_NVA3F(device->chipset))
2850 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2831 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2851 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2832 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2852 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ 2833 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2870 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2851 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2871 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ 2852 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2872 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2853 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2873 if (IS_NVA3F(dev_priv->chipset)) 2854 if (IS_NVA3F(device->chipset))
2874 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2855 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2875 if (dev_priv->chipset == 0x50) 2856 if (device->chipset == 0x50)
2876 xf_emit(ctx, 1, 0); /* ff */ 2857 xf_emit(ctx, 1, 0); /* ff */
2877 else 2858 else
2878 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ 2859 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2907 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2888 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2908 xf_emit(ctx, 1, 0); /* 00000007 */ 2889 xf_emit(ctx, 1, 0); /* 00000007 */
2909 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2890 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2910 if (IS_NVA3F(dev_priv->chipset)) 2891 if (IS_NVA3F(device->chipset))
2911 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2892 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2912 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ 2893 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
2913 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ 2894 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2945 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ 2926 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2946 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ 2927 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2947 xf_emit(ctx, 1, 0); /* 00000007 */ 2928 xf_emit(ctx, 1, 0); /* 00000007 */
2948 if (IS_NVA3F(dev_priv->chipset)) 2929 if (IS_NVA3F(device->chipset))
2949 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2930 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2950 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2931 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2951 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2932 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2974 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ 2955 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
2975 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2956 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2957 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2977 if (IS_NVA3F(dev_priv->chipset)) 2958 if (IS_NVA3F(device->chipset))
2978 xf_emit(ctx, 1, 0); /* 00000001 */ 2959 xf_emit(ctx, 1, 0); /* 00000001 */
2979 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2960 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2980 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ 2961 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2988 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ 2969 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2989 xf_emit(ctx, 1, 0); /* 7 */ 2970 xf_emit(ctx, 1, 0); /* 7 */
2990 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2971 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2991 if (IS_NVA3F(dev_priv->chipset)) { 2972 if (IS_NVA3F(device->chipset)) {
2992 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 2973 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
2993 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2974 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2994 } 2975 }
2995 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2996 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2977 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2997 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2978 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2998 if (dev_priv->chipset >= 0xa0) 2979 if (device->chipset >= 0xa0)
2999 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ 2980 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
3000 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2981 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
3001 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ 2982 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3012 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 2993 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
3013 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2994 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
3014 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2995 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
3015 if (IS_NVA3F(dev_priv->chipset)) { 2996 if (IS_NVA3F(device->chipset)) {
3016 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2997 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3017 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ 2998 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
3018 } 2999 }
3019 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 3000 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
3020 if (dev_priv->chipset >= 0xa0) { 3001 if (device->chipset >= 0xa0) {
3021 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ 3002 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
3022 xf_emit(ctx, 1, 0xfac6881); /* fffffff */ 3003 xf_emit(ctx, 1, 0xfac6881); /* fffffff */
3023 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ 3004 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3027 xf_emit(ctx, 2, 0); /* 7, f */ 3008 xf_emit(ctx, 2, 0); /* 7, f */
3028 xf_emit(ctx, 1, 1); /* 1 */ 3009 xf_emit(ctx, 1, 1); /* 1 */
3029 xf_emit(ctx, 1, 0); /* 7/f */ 3010 xf_emit(ctx, 1, 0); /* 7/f */
3030 if (IS_NVA3F(dev_priv->chipset)) 3011 if (IS_NVA3F(device->chipset))
3031 xf_emit(ctx, 0x9, 0); /* 1 */ 3012 xf_emit(ctx, 0x9, 0); /* 1 */
3032 else 3013 else
3033 xf_emit(ctx, 0x8, 0); /* 1 */ 3014 xf_emit(ctx, 0x8, 0); /* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3041 xf_emit(ctx, 1, 0x11); /* 7f */ 3022 xf_emit(ctx, 1, 0x11); /* 7f */
3042 xf_emit(ctx, 1, 1); /* 1 */ 3023 xf_emit(ctx, 1, 1); /* 1 */
3043 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ 3024 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
3044 if (IS_NVA3F(dev_priv->chipset)) { 3025 if (IS_NVA3F(device->chipset)) {
3045 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 3026 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
3046 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3027 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3047 } 3028 }
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3051static void 3032static void
3052nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) 3033nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3053{ 3034{
3054 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3035 struct nouveau_device *device = ctx->device;
3055 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ 3036 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
3056 if (dev_priv->chipset != 0x50) 3037 if (device->chipset != 0x50)
3057 xf_emit(ctx, 1, 0); /* 3 */ 3038 xf_emit(ctx, 1, 0); /* 3 */
3058 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ 3039 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
3059 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ 3040 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
3060 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ 3041 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
3061 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ 3042 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
3062 if (dev_priv->chipset == 0x50) 3043 if (device->chipset == 0x50)
3063 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ 3044 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
3064 else 3045 else
3065 xf_emit(ctx, 2, 0); /* 3ff, 1 */ 3046 xf_emit(ctx, 2, 0); /* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3071 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ 3052 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
3072 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ 3053 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
3073 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ 3054 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
3074 if (dev_priv->chipset == 0x50) { 3055 if (device->chipset == 0x50) {
3075 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ 3056 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
3076 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3057 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3077 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ 3058 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
3078 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ 3059 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
3079 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ 3060 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
3080 } else if (!IS_NVAAF(dev_priv->chipset)) { 3061 } else if (!IS_NVAAF(device->chipset)) {
3081 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3062 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3082 xf_emit(ctx, 1, 0); /* 00000003 */ 3063 xf_emit(ctx, 1, 0); /* 00000003 */
3083 xf_emit(ctx, 1, 0); /* 000003ff */ 3064 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3097static void 3078static void
3098nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) 3079nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3099{ 3080{
3100 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3081 struct nouveau_device *device = ctx->device;
3101 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 3082 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
3102 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 3083 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
3103 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ 3084 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3109 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3090 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
3110 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 3091 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
3111 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ 3092 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
3112 if (IS_NVA3F(dev_priv->chipset)) 3093 if (IS_NVA3F(device->chipset))
3113 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3094 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3114 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ 3095 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
3115 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3096 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3136static void 3117static void
3137nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) 3118nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3138{ 3119{
3139 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3120 struct nouveau_device *device = ctx->device;
3140 if (dev_priv->chipset < 0xa0) { 3121 if (device->chipset < 0xa0) {
3141 nv50_graph_construct_xfer_unk84xx(ctx); 3122 nv50_graph_construct_xfer_unk84xx(ctx);
3142 nv50_graph_construct_xfer_tprop(ctx); 3123 nv50_graph_construct_xfer_tprop(ctx);
3143 nv50_graph_construct_xfer_tex(ctx); 3124 nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3153static void 3134static void
3154nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) 3135nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3155{ 3136{
3156 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3137 struct nouveau_device *device = ctx->device;
3157 int i, mpcnt = 2; 3138 int i, mpcnt = 2;
3158 switch (dev_priv->chipset) { 3139 switch (device->chipset) {
3159 case 0x98: 3140 case 0x98:
3160 case 0xaa: 3141 case 0xaa:
3161 mpcnt = 1; 3142 mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3182 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ 3163 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
3183 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ 3164 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
3184 xf_emit(ctx, 1, 0x04000400); /* ffffffff */ 3165 xf_emit(ctx, 1, 0x04000400); /* ffffffff */
3185 if (dev_priv->chipset >= 0xa0) 3166 if (device->chipset >= 0xa0)
3186 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ 3167 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
3187 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ 3168 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
3188 xf_emit(ctx, 1, 0); /* ff/3ff */ 3169 xf_emit(ctx, 1, 0); /* ff/3ff */
3189 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 3170 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
3190 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { 3171 if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
3191 xf_emit(ctx, 1, 0xe00); /* 7fff */ 3172 xf_emit(ctx, 1, 0xe00); /* 7fff */
3192 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 3173 xf_emit(ctx, 1, 0x1e00); /* 7fff */
3193 } 3174 }
3194 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ 3175 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
3195 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ 3176 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
3196 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3177 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3197 if (dev_priv->chipset == 0x50) 3178 if (device->chipset == 0x50)
3198 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ 3179 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
3199 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ 3180 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
3200 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3181 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3201 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 3182 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
3202 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 3183 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
3203 if (IS_NVAAF(dev_priv->chipset)) 3184 if (IS_NVAAF(device->chipset))
3204 xf_emit(ctx, 0xb, 0); /* RO */ 3185 xf_emit(ctx, 0xb, 0); /* RO */
3205 else if (dev_priv->chipset >= 0xa0) 3186 else if (device->chipset >= 0xa0)
3206 xf_emit(ctx, 0xc, 0); /* RO */ 3187 xf_emit(ctx, 0xc, 0); /* RO */
3207 else 3188 else
3208 xf_emit(ctx, 0xa, 0); /* RO */ 3189 xf_emit(ctx, 0xa, 0); /* RO */
3209 } 3190 }
3210 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 3191 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
3211 xf_emit(ctx, 1, 0); /* ff/3ff */ 3192 xf_emit(ctx, 1, 0); /* ff/3ff */
3212 if (dev_priv->chipset >= 0xa0) { 3193 if (device->chipset >= 0xa0) {
3213 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ 3194 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
3214 } 3195 }
3215 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ 3196 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3223 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ 3204 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
3224 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ 3205 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
3225 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ 3206 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
3226 if (IS_NVA3F(dev_priv->chipset)) 3207 if (IS_NVA3F(device->chipset))
3227 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3208 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3228 xf_emit(ctx, 1, 0); /* ff/3ff */ 3209 xf_emit(ctx, 1, 0); /* ff/3ff */
3229 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ 3210 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3238 xf_emit(ctx, 1, 0); /* 00000007 */ 3219 xf_emit(ctx, 1, 0); /* 00000007 */
3239 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ 3220 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
3240 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 3221 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
3241 if (IS_NVA3F(dev_priv->chipset)) 3222 if (IS_NVA3F(device->chipset))
3242 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 3223 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
3243 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 3224 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
3244 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 3225 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3253 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ 3234 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
3254 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ 3235 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
3255 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ 3236 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
3256 if (IS_NVA3F(dev_priv->chipset)) { 3237 if (IS_NVA3F(device->chipset)) {
3257 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 3238 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
3258 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 3239 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
3259 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ 3240 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3268 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 3249 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
3269 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 3250 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
3270 /* XXX: demagic this part some day */ 3251 /* XXX: demagic this part some day */
3271 if (dev_priv->chipset == 0x50) 3252 if (device->chipset == 0x50)
3272 xf_emit(ctx, 0x3a0, 0); 3253 xf_emit(ctx, 0x3a0, 0);
3273 else if (dev_priv->chipset < 0x94) 3254 else if (device->chipset < 0x94)
3274 xf_emit(ctx, 0x3a2, 0); 3255 xf_emit(ctx, 0x3a2, 0);
3275 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) 3256 else if (device->chipset == 0x98 || device->chipset == 0xaa)
3276 xf_emit(ctx, 0x39f, 0); 3257 xf_emit(ctx, 0x39f, 0);
3277 else 3258 else
3278 xf_emit(ctx, 0x3a3, 0); 3259 xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3285static void 3266static void
3286nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) 3267nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
3287{ 3268{
3288 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3269 struct nouveau_device *device = ctx->device;
3289 int i; 3270 int i;
3290 uint32_t offset; 3271 u32 offset;
3291 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 3272 u32 units = nv_rd32 (ctx->device, 0x1540);
3292 int size = 0; 3273 int size = 0;
3293 3274
3294 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 3275 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
3295 3276
3296 if (dev_priv->chipset < 0xa0) { 3277 if (device->chipset < 0xa0) {
3297 for (i = 0; i < 8; i++) { 3278 for (i = 0; i < 8; i++) {
3298 ctx->ctxvals_pos = offset + i; 3279 ctx->ctxvals_pos = offset + i;
3299 /* that little bugger belongs to csched. No idea 3280 /* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
new file mode 100644
index 000000000000..0b7951a85943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -0,0 +1,3039 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27void
28nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
29{
30 nv_wr32(priv, 0x400204, data);
31 nv_wr32(priv, 0x400200, icmd);
32 while (nv_rd32(priv, 0x400700) & 2) {}
33}
34
35int
36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
37{
38 struct nouveau_bar *bar = nouveau_bar(priv);
39 struct nouveau_object *parent = nv_object(priv);
40 struct nouveau_gpuobj *chan;
41 u32 size = (0x80000 + priv->size + 4095) & ~4095;
42 int ret, i;
43
44 /* allocate memory to for a "channel", which we'll use to generate
45 * the default context values
46 */
47 ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
48 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
49 chan = info->chan;
50 if (ret) {
51 nv_error(priv, "failed to allocate channel memory, %d\n", ret);
52 return ret;
53 }
54
55 /* PGD pointer */
56 nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
57 nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
58 nv_wo32(chan, 0x0208, 0xffffffff);
59 nv_wo32(chan, 0x020c, 0x000000ff);
60
61 /* PGT[0] pointer */
62 nv_wo32(chan, 0x1000, 0x00000000);
63 nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
64
65 /* identity-map the whole "channel" into its own vm */
66 for (i = 0; i < size / 4096; i++) {
67 u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
68 nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
69 nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
70 }
71
72 /* context pointer (virt) */
73 nv_wo32(chan, 0x0210, 0x00080004);
74 nv_wo32(chan, 0x0214, 0x00000000);
75
76 bar->flush(bar);
77
78 nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
79 nv_wr32(priv, 0x100cbc, 0x80000001);
80 nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
81
82 /* setup default state for mmio list construction */
83 info->data = priv->mmio_data;
84 info->mmio = priv->mmio_list;
85 info->addr = 0x2000 + (i * 8);
86 info->priv = priv;
87 info->buffer_nr = 0;
88
89 if (priv->firmware) {
90 nv_wr32(priv, 0x409840, 0x00000030);
91 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
92 nv_wr32(priv, 0x409504, 0x00000003);
93 if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
94 nv_error(priv, "load_ctx timeout\n");
95
96 nv_wo32(chan, 0x8001c, 1);
97 nv_wo32(chan, 0x80020, 0);
98 nv_wo32(chan, 0x80028, 0);
99 nv_wo32(chan, 0x8002c, 0);
100 bar->flush(bar);
101 return 0;
102 }
103
104 /* HUB_FUC(SET_CHAN) */
105 nv_wr32(priv, 0x409840, 0x80000000);
106 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
107 nv_wr32(priv, 0x409504, 0x00000001);
108 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
109 nv_error(priv, "HUB_SET_CHAN timeout\n");
110 nvc0_graph_ctxctl_debug(priv);
111 nouveau_gpuobj_ref(NULL, &info->chan);
112 return -EBUSY;
113 }
114
115 return 0;
116}
117
118void
119nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
120{
121 info->buffer[info->buffer_nr] = info->addr;
122 info->buffer[info->buffer_nr] += (align - 1);
123 info->buffer[info->buffer_nr] &= ~(align - 1);
124 info->addr = info->buffer[info->buffer_nr++] + size;
125
126 info->data->size = size;
127 info->data->align = align;
128 info->data->access = access;
129 info->data++;
130}
131
132void
133nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
134{
135 struct nvc0_graph_priv *priv = info->priv;
136
137 info->mmio->addr = addr;
138 info->mmio->data = data;
139 info->mmio->shift = shift;
140 info->mmio->buffer = buf;
141 info->mmio++;
142
143 if (shift)
144 data |= info->buffer[buf] >> shift;
145 nv_wr32(priv, addr, data);
146}
147
148int
149nvc0_grctx_fini(struct nvc0_grctx *info)
150{
151 struct nvc0_graph_priv *priv = info->priv;
152 int i;
153
154 /* trigger a context unload by unsetting the "next channel valid" bit
155 * and faking a context switch interrupt
156 */
157 nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
158 nv_wr32(priv, 0x409000, 0x00000100);
159 if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
160 nv_error(priv, "grctx template channel unload timeout\n");
161 return -EBUSY;
162 }
163
164 priv->data = kmalloc(priv->size, GFP_KERNEL);
165 if (priv->data) {
166 for (i = 0; i < priv->size; i += 4)
167 priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i);
168 }
169
170 nouveau_gpuobj_ref(NULL, &info->chan);
171 return priv->data ? 0 : -ENOMEM;
172}
173
174static void
175nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
176{
177 u32 fermi = nvc0_graph_class(priv);
178 u32 mthd;
179
180 nv_mthd(priv, 0x9097, 0x0800, 0x00000000);
181 nv_mthd(priv, 0x9097, 0x0840, 0x00000000);
182 nv_mthd(priv, 0x9097, 0x0880, 0x00000000);
183 nv_mthd(priv, 0x9097, 0x08c0, 0x00000000);
184 nv_mthd(priv, 0x9097, 0x0900, 0x00000000);
185 nv_mthd(priv, 0x9097, 0x0940, 0x00000000);
186 nv_mthd(priv, 0x9097, 0x0980, 0x00000000);
187 nv_mthd(priv, 0x9097, 0x09c0, 0x00000000);
188 nv_mthd(priv, 0x9097, 0x0804, 0x00000000);
189 nv_mthd(priv, 0x9097, 0x0844, 0x00000000);
190 nv_mthd(priv, 0x9097, 0x0884, 0x00000000);
191 nv_mthd(priv, 0x9097, 0x08c4, 0x00000000);
192 nv_mthd(priv, 0x9097, 0x0904, 0x00000000);
193 nv_mthd(priv, 0x9097, 0x0944, 0x00000000);
194 nv_mthd(priv, 0x9097, 0x0984, 0x00000000);
195 nv_mthd(priv, 0x9097, 0x09c4, 0x00000000);
196 nv_mthd(priv, 0x9097, 0x0808, 0x00000400);
197 nv_mthd(priv, 0x9097, 0x0848, 0x00000400);
198 nv_mthd(priv, 0x9097, 0x0888, 0x00000400);
199 nv_mthd(priv, 0x9097, 0x08c8, 0x00000400);
200 nv_mthd(priv, 0x9097, 0x0908, 0x00000400);
201 nv_mthd(priv, 0x9097, 0x0948, 0x00000400);
202 nv_mthd(priv, 0x9097, 0x0988, 0x00000400);
203 nv_mthd(priv, 0x9097, 0x09c8, 0x00000400);
204 nv_mthd(priv, 0x9097, 0x080c, 0x00000300);
205 nv_mthd(priv, 0x9097, 0x084c, 0x00000300);
206 nv_mthd(priv, 0x9097, 0x088c, 0x00000300);
207 nv_mthd(priv, 0x9097, 0x08cc, 0x00000300);
208 nv_mthd(priv, 0x9097, 0x090c, 0x00000300);
209 nv_mthd(priv, 0x9097, 0x094c, 0x00000300);
210 nv_mthd(priv, 0x9097, 0x098c, 0x00000300);
211 nv_mthd(priv, 0x9097, 0x09cc, 0x00000300);
212 nv_mthd(priv, 0x9097, 0x0810, 0x000000cf);
213 nv_mthd(priv, 0x9097, 0x0850, 0x00000000);
214 nv_mthd(priv, 0x9097, 0x0890, 0x00000000);
215 nv_mthd(priv, 0x9097, 0x08d0, 0x00000000);
216 nv_mthd(priv, 0x9097, 0x0910, 0x00000000);
217 nv_mthd(priv, 0x9097, 0x0950, 0x00000000);
218 nv_mthd(priv, 0x9097, 0x0990, 0x00000000);
219 nv_mthd(priv, 0x9097, 0x09d0, 0x00000000);
220 nv_mthd(priv, 0x9097, 0x0814, 0x00000040);
221 nv_mthd(priv, 0x9097, 0x0854, 0x00000040);
222 nv_mthd(priv, 0x9097, 0x0894, 0x00000040);
223 nv_mthd(priv, 0x9097, 0x08d4, 0x00000040);
224 nv_mthd(priv, 0x9097, 0x0914, 0x00000040);
225 nv_mthd(priv, 0x9097, 0x0954, 0x00000040);
226 nv_mthd(priv, 0x9097, 0x0994, 0x00000040);
227 nv_mthd(priv, 0x9097, 0x09d4, 0x00000040);
228 nv_mthd(priv, 0x9097, 0x0818, 0x00000001);
229 nv_mthd(priv, 0x9097, 0x0858, 0x00000001);
230 nv_mthd(priv, 0x9097, 0x0898, 0x00000001);
231 nv_mthd(priv, 0x9097, 0x08d8, 0x00000001);
232 nv_mthd(priv, 0x9097, 0x0918, 0x00000001);
233 nv_mthd(priv, 0x9097, 0x0958, 0x00000001);
234 nv_mthd(priv, 0x9097, 0x0998, 0x00000001);
235 nv_mthd(priv, 0x9097, 0x09d8, 0x00000001);
236 nv_mthd(priv, 0x9097, 0x081c, 0x00000000);
237 nv_mthd(priv, 0x9097, 0x085c, 0x00000000);
238 nv_mthd(priv, 0x9097, 0x089c, 0x00000000);
239 nv_mthd(priv, 0x9097, 0x08dc, 0x00000000);
240 nv_mthd(priv, 0x9097, 0x091c, 0x00000000);
241 nv_mthd(priv, 0x9097, 0x095c, 0x00000000);
242 nv_mthd(priv, 0x9097, 0x099c, 0x00000000);
243 nv_mthd(priv, 0x9097, 0x09dc, 0x00000000);
244 nv_mthd(priv, 0x9097, 0x0820, 0x00000000);
245 nv_mthd(priv, 0x9097, 0x0860, 0x00000000);
246 nv_mthd(priv, 0x9097, 0x08a0, 0x00000000);
247 nv_mthd(priv, 0x9097, 0x08e0, 0x00000000);
248 nv_mthd(priv, 0x9097, 0x0920, 0x00000000);
249 nv_mthd(priv, 0x9097, 0x0960, 0x00000000);
250 nv_mthd(priv, 0x9097, 0x09a0, 0x00000000);
251 nv_mthd(priv, 0x9097, 0x09e0, 0x00000000);
252 nv_mthd(priv, 0x9097, 0x2700, 0x00000000);
253 nv_mthd(priv, 0x9097, 0x2720, 0x00000000);
254 nv_mthd(priv, 0x9097, 0x2740, 0x00000000);
255 nv_mthd(priv, 0x9097, 0x2760, 0x00000000);
256 nv_mthd(priv, 0x9097, 0x2780, 0x00000000);
257 nv_mthd(priv, 0x9097, 0x27a0, 0x00000000);
258 nv_mthd(priv, 0x9097, 0x27c0, 0x00000000);
259 nv_mthd(priv, 0x9097, 0x27e0, 0x00000000);
260 nv_mthd(priv, 0x9097, 0x2704, 0x00000000);
261 nv_mthd(priv, 0x9097, 0x2724, 0x00000000);
262 nv_mthd(priv, 0x9097, 0x2744, 0x00000000);
263 nv_mthd(priv, 0x9097, 0x2764, 0x00000000);
264 nv_mthd(priv, 0x9097, 0x2784, 0x00000000);
265 nv_mthd(priv, 0x9097, 0x27a4, 0x00000000);
266 nv_mthd(priv, 0x9097, 0x27c4, 0x00000000);
267 nv_mthd(priv, 0x9097, 0x27e4, 0x00000000);
268 nv_mthd(priv, 0x9097, 0x2708, 0x00000000);
269 nv_mthd(priv, 0x9097, 0x2728, 0x00000000);
270 nv_mthd(priv, 0x9097, 0x2748, 0x00000000);
271 nv_mthd(priv, 0x9097, 0x2768, 0x00000000);
272 nv_mthd(priv, 0x9097, 0x2788, 0x00000000);
273 nv_mthd(priv, 0x9097, 0x27a8, 0x00000000);
274 nv_mthd(priv, 0x9097, 0x27c8, 0x00000000);
275 nv_mthd(priv, 0x9097, 0x27e8, 0x00000000);
276 nv_mthd(priv, 0x9097, 0x270c, 0x00000000);
277 nv_mthd(priv, 0x9097, 0x272c, 0x00000000);
278 nv_mthd(priv, 0x9097, 0x274c, 0x00000000);
279 nv_mthd(priv, 0x9097, 0x276c, 0x00000000);
280 nv_mthd(priv, 0x9097, 0x278c, 0x00000000);
281 nv_mthd(priv, 0x9097, 0x27ac, 0x00000000);
282 nv_mthd(priv, 0x9097, 0x27cc, 0x00000000);
283 nv_mthd(priv, 0x9097, 0x27ec, 0x00000000);
284 nv_mthd(priv, 0x9097, 0x2710, 0x00014000);
285 nv_mthd(priv, 0x9097, 0x2730, 0x00014000);
286 nv_mthd(priv, 0x9097, 0x2750, 0x00014000);
287 nv_mthd(priv, 0x9097, 0x2770, 0x00014000);
288 nv_mthd(priv, 0x9097, 0x2790, 0x00014000);
289 nv_mthd(priv, 0x9097, 0x27b0, 0x00014000);
290 nv_mthd(priv, 0x9097, 0x27d0, 0x00014000);
291 nv_mthd(priv, 0x9097, 0x27f0, 0x00014000);
292 nv_mthd(priv, 0x9097, 0x2714, 0x00000040);
293 nv_mthd(priv, 0x9097, 0x2734, 0x00000040);
294 nv_mthd(priv, 0x9097, 0x2754, 0x00000040);
295 nv_mthd(priv, 0x9097, 0x2774, 0x00000040);
296 nv_mthd(priv, 0x9097, 0x2794, 0x00000040);
297 nv_mthd(priv, 0x9097, 0x27b4, 0x00000040);
298 nv_mthd(priv, 0x9097, 0x27d4, 0x00000040);
299 nv_mthd(priv, 0x9097, 0x27f4, 0x00000040);
300 nv_mthd(priv, 0x9097, 0x1c00, 0x00000000);
301 nv_mthd(priv, 0x9097, 0x1c10, 0x00000000);
302 nv_mthd(priv, 0x9097, 0x1c20, 0x00000000);
303 nv_mthd(priv, 0x9097, 0x1c30, 0x00000000);
304 nv_mthd(priv, 0x9097, 0x1c40, 0x00000000);
305 nv_mthd(priv, 0x9097, 0x1c50, 0x00000000);
306 nv_mthd(priv, 0x9097, 0x1c60, 0x00000000);
307 nv_mthd(priv, 0x9097, 0x1c70, 0x00000000);
308 nv_mthd(priv, 0x9097, 0x1c80, 0x00000000);
309 nv_mthd(priv, 0x9097, 0x1c90, 0x00000000);
310 nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000);
311 nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000);
312 nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000);
313 nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000);
314 nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000);
315 nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000);
316 nv_mthd(priv, 0x9097, 0x1c04, 0x00000000);
317 nv_mthd(priv, 0x9097, 0x1c14, 0x00000000);
318 nv_mthd(priv, 0x9097, 0x1c24, 0x00000000);
319 nv_mthd(priv, 0x9097, 0x1c34, 0x00000000);
320 nv_mthd(priv, 0x9097, 0x1c44, 0x00000000);
321 nv_mthd(priv, 0x9097, 0x1c54, 0x00000000);
322 nv_mthd(priv, 0x9097, 0x1c64, 0x00000000);
323 nv_mthd(priv, 0x9097, 0x1c74, 0x00000000);
324 nv_mthd(priv, 0x9097, 0x1c84, 0x00000000);
325 nv_mthd(priv, 0x9097, 0x1c94, 0x00000000);
326 nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000);
327 nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000);
328 nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000);
329 nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000);
330 nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000);
331 nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000);
332 nv_mthd(priv, 0x9097, 0x1c08, 0x00000000);
333 nv_mthd(priv, 0x9097, 0x1c18, 0x00000000);
334 nv_mthd(priv, 0x9097, 0x1c28, 0x00000000);
335 nv_mthd(priv, 0x9097, 0x1c38, 0x00000000);
336 nv_mthd(priv, 0x9097, 0x1c48, 0x00000000);
337 nv_mthd(priv, 0x9097, 0x1c58, 0x00000000);
338 nv_mthd(priv, 0x9097, 0x1c68, 0x00000000);
339 nv_mthd(priv, 0x9097, 0x1c78, 0x00000000);
340 nv_mthd(priv, 0x9097, 0x1c88, 0x00000000);
341 nv_mthd(priv, 0x9097, 0x1c98, 0x00000000);
342 nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000);
343 nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000);
344 nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000);
345 nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000);
346 nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000);
347 nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000);
348 nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000);
349 nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000);
350 nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000);
351 nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000);
352 nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000);
353 nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000);
354 nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000);
355 nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000);
356 nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000);
357 nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000);
358 nv_mthd(priv, 0x9097, 0x1cac, 0x00000000);
359 nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000);
360 nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000);
361 nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000);
362 nv_mthd(priv, 0x9097, 0x1cec, 0x00000000);
363 nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000);
364 nv_mthd(priv, 0x9097, 0x1d00, 0x00000000);
365 nv_mthd(priv, 0x9097, 0x1d10, 0x00000000);
366 nv_mthd(priv, 0x9097, 0x1d20, 0x00000000);
367 nv_mthd(priv, 0x9097, 0x1d30, 0x00000000);
368 nv_mthd(priv, 0x9097, 0x1d40, 0x00000000);
369 nv_mthd(priv, 0x9097, 0x1d50, 0x00000000);
370 nv_mthd(priv, 0x9097, 0x1d60, 0x00000000);
371 nv_mthd(priv, 0x9097, 0x1d70, 0x00000000);
372 nv_mthd(priv, 0x9097, 0x1d80, 0x00000000);
373 nv_mthd(priv, 0x9097, 0x1d90, 0x00000000);
374 nv_mthd(priv, 0x9097, 0x1da0, 0x00000000);
375 nv_mthd(priv, 0x9097, 0x1db0, 0x00000000);
376 nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000);
377 nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000);
378 nv_mthd(priv, 0x9097, 0x1de0, 0x00000000);
379 nv_mthd(priv, 0x9097, 0x1df0, 0x00000000);
380 nv_mthd(priv, 0x9097, 0x1d04, 0x00000000);
381 nv_mthd(priv, 0x9097, 0x1d14, 0x00000000);
382 nv_mthd(priv, 0x9097, 0x1d24, 0x00000000);
383 nv_mthd(priv, 0x9097, 0x1d34, 0x00000000);
384 nv_mthd(priv, 0x9097, 0x1d44, 0x00000000);
385 nv_mthd(priv, 0x9097, 0x1d54, 0x00000000);
386 nv_mthd(priv, 0x9097, 0x1d64, 0x00000000);
387 nv_mthd(priv, 0x9097, 0x1d74, 0x00000000);
388 nv_mthd(priv, 0x9097, 0x1d84, 0x00000000);
389 nv_mthd(priv, 0x9097, 0x1d94, 0x00000000);
390 nv_mthd(priv, 0x9097, 0x1da4, 0x00000000);
391 nv_mthd(priv, 0x9097, 0x1db4, 0x00000000);
392 nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000);
393 nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000);
394 nv_mthd(priv, 0x9097, 0x1de4, 0x00000000);
395 nv_mthd(priv, 0x9097, 0x1df4, 0x00000000);
396 nv_mthd(priv, 0x9097, 0x1d08, 0x00000000);
397 nv_mthd(priv, 0x9097, 0x1d18, 0x00000000);
398 nv_mthd(priv, 0x9097, 0x1d28, 0x00000000);
399 nv_mthd(priv, 0x9097, 0x1d38, 0x00000000);
400 nv_mthd(priv, 0x9097, 0x1d48, 0x00000000);
401 nv_mthd(priv, 0x9097, 0x1d58, 0x00000000);
402 nv_mthd(priv, 0x9097, 0x1d68, 0x00000000);
403 nv_mthd(priv, 0x9097, 0x1d78, 0x00000000);
404 nv_mthd(priv, 0x9097, 0x1d88, 0x00000000);
405 nv_mthd(priv, 0x9097, 0x1d98, 0x00000000);
406 nv_mthd(priv, 0x9097, 0x1da8, 0x00000000);
407 nv_mthd(priv, 0x9097, 0x1db8, 0x00000000);
408 nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000);
409 nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000);
410 nv_mthd(priv, 0x9097, 0x1de8, 0x00000000);
411 nv_mthd(priv, 0x9097, 0x1df8, 0x00000000);
412 nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000);
413 nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000);
414 nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000);
415 nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000);
416 nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000);
417 nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000);
418 nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000);
419 nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000);
420 nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000);
421 nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000);
422 nv_mthd(priv, 0x9097, 0x1dac, 0x00000000);
423 nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000);
424 nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000);
425 nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000);
426 nv_mthd(priv, 0x9097, 0x1dec, 0x00000000);
427 nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000);
428 nv_mthd(priv, 0x9097, 0x1f00, 0x00000000);
429 nv_mthd(priv, 0x9097, 0x1f08, 0x00000000);
430 nv_mthd(priv, 0x9097, 0x1f10, 0x00000000);
431 nv_mthd(priv, 0x9097, 0x1f18, 0x00000000);
432 nv_mthd(priv, 0x9097, 0x1f20, 0x00000000);
433 nv_mthd(priv, 0x9097, 0x1f28, 0x00000000);
434 nv_mthd(priv, 0x9097, 0x1f30, 0x00000000);
435 nv_mthd(priv, 0x9097, 0x1f38, 0x00000000);
436 nv_mthd(priv, 0x9097, 0x1f40, 0x00000000);
437 nv_mthd(priv, 0x9097, 0x1f48, 0x00000000);
438 nv_mthd(priv, 0x9097, 0x1f50, 0x00000000);
439 nv_mthd(priv, 0x9097, 0x1f58, 0x00000000);
440 nv_mthd(priv, 0x9097, 0x1f60, 0x00000000);
441 nv_mthd(priv, 0x9097, 0x1f68, 0x00000000);
442 nv_mthd(priv, 0x9097, 0x1f70, 0x00000000);
443 nv_mthd(priv, 0x9097, 0x1f78, 0x00000000);
444 nv_mthd(priv, 0x9097, 0x1f04, 0x00000000);
445 nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000);
446 nv_mthd(priv, 0x9097, 0x1f14, 0x00000000);
447 nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000);
448 nv_mthd(priv, 0x9097, 0x1f24, 0x00000000);
449 nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000);
450 nv_mthd(priv, 0x9097, 0x1f34, 0x00000000);
451 nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000);
452 nv_mthd(priv, 0x9097, 0x1f44, 0x00000000);
453 nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000);
454 nv_mthd(priv, 0x9097, 0x1f54, 0x00000000);
455 nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000);
456 nv_mthd(priv, 0x9097, 0x1f64, 0x00000000);
457 nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000);
458 nv_mthd(priv, 0x9097, 0x1f74, 0x00000000);
459 nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000);
460 nv_mthd(priv, 0x9097, 0x1f80, 0x00000000);
461 nv_mthd(priv, 0x9097, 0x1f88, 0x00000000);
462 nv_mthd(priv, 0x9097, 0x1f90, 0x00000000);
463 nv_mthd(priv, 0x9097, 0x1f98, 0x00000000);
464 nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000);
465 nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000);
466 nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000);
467 nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000);
468 nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000);
469 nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000);
470 nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000);
471 nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000);
472 nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000);
473 nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000);
474 nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000);
475 nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000);
476 nv_mthd(priv, 0x9097, 0x1f84, 0x00000000);
477 nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000);
478 nv_mthd(priv, 0x9097, 0x1f94, 0x00000000);
479 nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000);
480 nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000);
481 nv_mthd(priv, 0x9097, 0x1fac, 0x00000000);
482 nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000);
483 nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000);
484 nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000);
485 nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000);
486 nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000);
487 nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000);
488 nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000);
489 nv_mthd(priv, 0x9097, 0x1fec, 0x00000000);
490 nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000);
491 nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000);
492 nv_mthd(priv, 0x9097, 0x2200, 0x00000022);
493 nv_mthd(priv, 0x9097, 0x2210, 0x00000022);
494 nv_mthd(priv, 0x9097, 0x2220, 0x00000022);
495 nv_mthd(priv, 0x9097, 0x2230, 0x00000022);
496 nv_mthd(priv, 0x9097, 0x2240, 0x00000022);
497 nv_mthd(priv, 0x9097, 0x2000, 0x00000000);
498 nv_mthd(priv, 0x9097, 0x2040, 0x00000011);
499 nv_mthd(priv, 0x9097, 0x2080, 0x00000020);
500 nv_mthd(priv, 0x9097, 0x20c0, 0x00000030);
501 nv_mthd(priv, 0x9097, 0x2100, 0x00000040);
502 nv_mthd(priv, 0x9097, 0x2140, 0x00000051);
503 nv_mthd(priv, 0x9097, 0x200c, 0x00000001);
504 nv_mthd(priv, 0x9097, 0x204c, 0x00000001);
505 nv_mthd(priv, 0x9097, 0x208c, 0x00000001);
506 nv_mthd(priv, 0x9097, 0x20cc, 0x00000001);
507 nv_mthd(priv, 0x9097, 0x210c, 0x00000001);
508 nv_mthd(priv, 0x9097, 0x214c, 0x00000001);
509 nv_mthd(priv, 0x9097, 0x2010, 0x00000000);
510 nv_mthd(priv, 0x9097, 0x2050, 0x00000000);
511 nv_mthd(priv, 0x9097, 0x2090, 0x00000001);
512 nv_mthd(priv, 0x9097, 0x20d0, 0x00000002);
513 nv_mthd(priv, 0x9097, 0x2110, 0x00000003);
514 nv_mthd(priv, 0x9097, 0x2150, 0x00000004);
515 nv_mthd(priv, 0x9097, 0x0380, 0x00000000);
516 nv_mthd(priv, 0x9097, 0x03a0, 0x00000000);
517 nv_mthd(priv, 0x9097, 0x03c0, 0x00000000);
518 nv_mthd(priv, 0x9097, 0x03e0, 0x00000000);
519 nv_mthd(priv, 0x9097, 0x0384, 0x00000000);
520 nv_mthd(priv, 0x9097, 0x03a4, 0x00000000);
521 nv_mthd(priv, 0x9097, 0x03c4, 0x00000000);
522 nv_mthd(priv, 0x9097, 0x03e4, 0x00000000);
523 nv_mthd(priv, 0x9097, 0x0388, 0x00000000);
524 nv_mthd(priv, 0x9097, 0x03a8, 0x00000000);
525 nv_mthd(priv, 0x9097, 0x03c8, 0x00000000);
526 nv_mthd(priv, 0x9097, 0x03e8, 0x00000000);
527 nv_mthd(priv, 0x9097, 0x038c, 0x00000000);
528 nv_mthd(priv, 0x9097, 0x03ac, 0x00000000);
529 nv_mthd(priv, 0x9097, 0x03cc, 0x00000000);
530 nv_mthd(priv, 0x9097, 0x03ec, 0x00000000);
531 nv_mthd(priv, 0x9097, 0x0700, 0x00000000);
532 nv_mthd(priv, 0x9097, 0x0710, 0x00000000);
533 nv_mthd(priv, 0x9097, 0x0720, 0x00000000);
534 nv_mthd(priv, 0x9097, 0x0730, 0x00000000);
535 nv_mthd(priv, 0x9097, 0x0704, 0x00000000);
536 nv_mthd(priv, 0x9097, 0x0714, 0x00000000);
537 nv_mthd(priv, 0x9097, 0x0724, 0x00000000);
538 nv_mthd(priv, 0x9097, 0x0734, 0x00000000);
539 nv_mthd(priv, 0x9097, 0x0708, 0x00000000);
540 nv_mthd(priv, 0x9097, 0x0718, 0x00000000);
541 nv_mthd(priv, 0x9097, 0x0728, 0x00000000);
542 nv_mthd(priv, 0x9097, 0x0738, 0x00000000);
543 nv_mthd(priv, 0x9097, 0x2800, 0x00000000);
544 nv_mthd(priv, 0x9097, 0x2804, 0x00000000);
545 nv_mthd(priv, 0x9097, 0x2808, 0x00000000);
546 nv_mthd(priv, 0x9097, 0x280c, 0x00000000);
547 nv_mthd(priv, 0x9097, 0x2810, 0x00000000);
548 nv_mthd(priv, 0x9097, 0x2814, 0x00000000);
549 nv_mthd(priv, 0x9097, 0x2818, 0x00000000);
550 nv_mthd(priv, 0x9097, 0x281c, 0x00000000);
551 nv_mthd(priv, 0x9097, 0x2820, 0x00000000);
552 nv_mthd(priv, 0x9097, 0x2824, 0x00000000);
553 nv_mthd(priv, 0x9097, 0x2828, 0x00000000);
554 nv_mthd(priv, 0x9097, 0x282c, 0x00000000);
555 nv_mthd(priv, 0x9097, 0x2830, 0x00000000);
556 nv_mthd(priv, 0x9097, 0x2834, 0x00000000);
557 nv_mthd(priv, 0x9097, 0x2838, 0x00000000);
558 nv_mthd(priv, 0x9097, 0x283c, 0x00000000);
559 nv_mthd(priv, 0x9097, 0x2840, 0x00000000);
560 nv_mthd(priv, 0x9097, 0x2844, 0x00000000);
561 nv_mthd(priv, 0x9097, 0x2848, 0x00000000);
562 nv_mthd(priv, 0x9097, 0x284c, 0x00000000);
563 nv_mthd(priv, 0x9097, 0x2850, 0x00000000);
564 nv_mthd(priv, 0x9097, 0x2854, 0x00000000);
565 nv_mthd(priv, 0x9097, 0x2858, 0x00000000);
566 nv_mthd(priv, 0x9097, 0x285c, 0x00000000);
567 nv_mthd(priv, 0x9097, 0x2860, 0x00000000);
568 nv_mthd(priv, 0x9097, 0x2864, 0x00000000);
569 nv_mthd(priv, 0x9097, 0x2868, 0x00000000);
570 nv_mthd(priv, 0x9097, 0x286c, 0x00000000);
571 nv_mthd(priv, 0x9097, 0x2870, 0x00000000);
572 nv_mthd(priv, 0x9097, 0x2874, 0x00000000);
573 nv_mthd(priv, 0x9097, 0x2878, 0x00000000);
574 nv_mthd(priv, 0x9097, 0x287c, 0x00000000);
575 nv_mthd(priv, 0x9097, 0x2880, 0x00000000);
576 nv_mthd(priv, 0x9097, 0x2884, 0x00000000);
577 nv_mthd(priv, 0x9097, 0x2888, 0x00000000);
578 nv_mthd(priv, 0x9097, 0x288c, 0x00000000);
579 nv_mthd(priv, 0x9097, 0x2890, 0x00000000);
580 nv_mthd(priv, 0x9097, 0x2894, 0x00000000);
581 nv_mthd(priv, 0x9097, 0x2898, 0x00000000);
582 nv_mthd(priv, 0x9097, 0x289c, 0x00000000);
583 nv_mthd(priv, 0x9097, 0x28a0, 0x00000000);
584 nv_mthd(priv, 0x9097, 0x28a4, 0x00000000);
585 nv_mthd(priv, 0x9097, 0x28a8, 0x00000000);
586 nv_mthd(priv, 0x9097, 0x28ac, 0x00000000);
587 nv_mthd(priv, 0x9097, 0x28b0, 0x00000000);
588 nv_mthd(priv, 0x9097, 0x28b4, 0x00000000);
589 nv_mthd(priv, 0x9097, 0x28b8, 0x00000000);
590 nv_mthd(priv, 0x9097, 0x28bc, 0x00000000);
591 nv_mthd(priv, 0x9097, 0x28c0, 0x00000000);
592 nv_mthd(priv, 0x9097, 0x28c4, 0x00000000);
593 nv_mthd(priv, 0x9097, 0x28c8, 0x00000000);
594 nv_mthd(priv, 0x9097, 0x28cc, 0x00000000);
595 nv_mthd(priv, 0x9097, 0x28d0, 0x00000000);
596 nv_mthd(priv, 0x9097, 0x28d4, 0x00000000);
597 nv_mthd(priv, 0x9097, 0x28d8, 0x00000000);
598 nv_mthd(priv, 0x9097, 0x28dc, 0x00000000);
599 nv_mthd(priv, 0x9097, 0x28e0, 0x00000000);
600 nv_mthd(priv, 0x9097, 0x28e4, 0x00000000);
601 nv_mthd(priv, 0x9097, 0x28e8, 0x00000000);
602 nv_mthd(priv, 0x9097, 0x28ec, 0x00000000);
603 nv_mthd(priv, 0x9097, 0x28f0, 0x00000000);
604 nv_mthd(priv, 0x9097, 0x28f4, 0x00000000);
605 nv_mthd(priv, 0x9097, 0x28f8, 0x00000000);
606 nv_mthd(priv, 0x9097, 0x28fc, 0x00000000);
607 nv_mthd(priv, 0x9097, 0x2900, 0x00000000);
608 nv_mthd(priv, 0x9097, 0x2904, 0x00000000);
609 nv_mthd(priv, 0x9097, 0x2908, 0x00000000);
610 nv_mthd(priv, 0x9097, 0x290c, 0x00000000);
611 nv_mthd(priv, 0x9097, 0x2910, 0x00000000);
612 nv_mthd(priv, 0x9097, 0x2914, 0x00000000);
613 nv_mthd(priv, 0x9097, 0x2918, 0x00000000);
614 nv_mthd(priv, 0x9097, 0x291c, 0x00000000);
615 nv_mthd(priv, 0x9097, 0x2920, 0x00000000);
616 nv_mthd(priv, 0x9097, 0x2924, 0x00000000);
617 nv_mthd(priv, 0x9097, 0x2928, 0x00000000);
618 nv_mthd(priv, 0x9097, 0x292c, 0x00000000);
619 nv_mthd(priv, 0x9097, 0x2930, 0x00000000);
620 nv_mthd(priv, 0x9097, 0x2934, 0x00000000);
621 nv_mthd(priv, 0x9097, 0x2938, 0x00000000);
622 nv_mthd(priv, 0x9097, 0x293c, 0x00000000);
623 nv_mthd(priv, 0x9097, 0x2940, 0x00000000);
624 nv_mthd(priv, 0x9097, 0x2944, 0x00000000);
625 nv_mthd(priv, 0x9097, 0x2948, 0x00000000);
626 nv_mthd(priv, 0x9097, 0x294c, 0x00000000);
627 nv_mthd(priv, 0x9097, 0x2950, 0x00000000);
628 nv_mthd(priv, 0x9097, 0x2954, 0x00000000);
629 nv_mthd(priv, 0x9097, 0x2958, 0x00000000);
630 nv_mthd(priv, 0x9097, 0x295c, 0x00000000);
631 nv_mthd(priv, 0x9097, 0x2960, 0x00000000);
632 nv_mthd(priv, 0x9097, 0x2964, 0x00000000);
633 nv_mthd(priv, 0x9097, 0x2968, 0x00000000);
634 nv_mthd(priv, 0x9097, 0x296c, 0x00000000);
635 nv_mthd(priv, 0x9097, 0x2970, 0x00000000);
636 nv_mthd(priv, 0x9097, 0x2974, 0x00000000);
637 nv_mthd(priv, 0x9097, 0x2978, 0x00000000);
638 nv_mthd(priv, 0x9097, 0x297c, 0x00000000);
639 nv_mthd(priv, 0x9097, 0x2980, 0x00000000);
640 nv_mthd(priv, 0x9097, 0x2984, 0x00000000);
641 nv_mthd(priv, 0x9097, 0x2988, 0x00000000);
642 nv_mthd(priv, 0x9097, 0x298c, 0x00000000);
643 nv_mthd(priv, 0x9097, 0x2990, 0x00000000);
644 nv_mthd(priv, 0x9097, 0x2994, 0x00000000);
645 nv_mthd(priv, 0x9097, 0x2998, 0x00000000);
646 nv_mthd(priv, 0x9097, 0x299c, 0x00000000);
647 nv_mthd(priv, 0x9097, 0x29a0, 0x00000000);
648 nv_mthd(priv, 0x9097, 0x29a4, 0x00000000);
649 nv_mthd(priv, 0x9097, 0x29a8, 0x00000000);
650 nv_mthd(priv, 0x9097, 0x29ac, 0x00000000);
651 nv_mthd(priv, 0x9097, 0x29b0, 0x00000000);
652 nv_mthd(priv, 0x9097, 0x29b4, 0x00000000);
653 nv_mthd(priv, 0x9097, 0x29b8, 0x00000000);
654 nv_mthd(priv, 0x9097, 0x29bc, 0x00000000);
655 nv_mthd(priv, 0x9097, 0x29c0, 0x00000000);
656 nv_mthd(priv, 0x9097, 0x29c4, 0x00000000);
657 nv_mthd(priv, 0x9097, 0x29c8, 0x00000000);
658 nv_mthd(priv, 0x9097, 0x29cc, 0x00000000);
659 nv_mthd(priv, 0x9097, 0x29d0, 0x00000000);
660 nv_mthd(priv, 0x9097, 0x29d4, 0x00000000);
661 nv_mthd(priv, 0x9097, 0x29d8, 0x00000000);
662 nv_mthd(priv, 0x9097, 0x29dc, 0x00000000);
663 nv_mthd(priv, 0x9097, 0x29e0, 0x00000000);
664 nv_mthd(priv, 0x9097, 0x29e4, 0x00000000);
665 nv_mthd(priv, 0x9097, 0x29e8, 0x00000000);
666 nv_mthd(priv, 0x9097, 0x29ec, 0x00000000);
667 nv_mthd(priv, 0x9097, 0x29f0, 0x00000000);
668 nv_mthd(priv, 0x9097, 0x29f4, 0x00000000);
669 nv_mthd(priv, 0x9097, 0x29f8, 0x00000000);
670 nv_mthd(priv, 0x9097, 0x29fc, 0x00000000);
671 nv_mthd(priv, 0x9097, 0x0a00, 0x00000000);
672 nv_mthd(priv, 0x9097, 0x0a20, 0x00000000);
673 nv_mthd(priv, 0x9097, 0x0a40, 0x00000000);
674 nv_mthd(priv, 0x9097, 0x0a60, 0x00000000);
675 nv_mthd(priv, 0x9097, 0x0a80, 0x00000000);
676 nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000);
677 nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000);
678 nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000);
679 nv_mthd(priv, 0x9097, 0x0b00, 0x00000000);
680 nv_mthd(priv, 0x9097, 0x0b20, 0x00000000);
681 nv_mthd(priv, 0x9097, 0x0b40, 0x00000000);
682 nv_mthd(priv, 0x9097, 0x0b60, 0x00000000);
683 nv_mthd(priv, 0x9097, 0x0b80, 0x00000000);
684 nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000);
685 nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000);
686 nv_mthd(priv, 0x9097, 0x0be0, 0x00000000);
687 nv_mthd(priv, 0x9097, 0x0a04, 0x00000000);
688 nv_mthd(priv, 0x9097, 0x0a24, 0x00000000);
689 nv_mthd(priv, 0x9097, 0x0a44, 0x00000000);
690 nv_mthd(priv, 0x9097, 0x0a64, 0x00000000);
691 nv_mthd(priv, 0x9097, 0x0a84, 0x00000000);
692 nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000);
693 nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000);
694 nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000);
695 nv_mthd(priv, 0x9097, 0x0b04, 0x00000000);
696 nv_mthd(priv, 0x9097, 0x0b24, 0x00000000);
697 nv_mthd(priv, 0x9097, 0x0b44, 0x00000000);
698 nv_mthd(priv, 0x9097, 0x0b64, 0x00000000);
699 nv_mthd(priv, 0x9097, 0x0b84, 0x00000000);
700 nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000);
701 nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000);
702 nv_mthd(priv, 0x9097, 0x0be4, 0x00000000);
703 nv_mthd(priv, 0x9097, 0x0a08, 0x00000000);
704 nv_mthd(priv, 0x9097, 0x0a28, 0x00000000);
705 nv_mthd(priv, 0x9097, 0x0a48, 0x00000000);
706 nv_mthd(priv, 0x9097, 0x0a68, 0x00000000);
707 nv_mthd(priv, 0x9097, 0x0a88, 0x00000000);
708 nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000);
709 nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000);
710 nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000);
711 nv_mthd(priv, 0x9097, 0x0b08, 0x00000000);
712 nv_mthd(priv, 0x9097, 0x0b28, 0x00000000);
713 nv_mthd(priv, 0x9097, 0x0b48, 0x00000000);
714 nv_mthd(priv, 0x9097, 0x0b68, 0x00000000);
715 nv_mthd(priv, 0x9097, 0x0b88, 0x00000000);
716 nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000);
717 nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000);
718 nv_mthd(priv, 0x9097, 0x0be8, 0x00000000);
719 nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000);
720 nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000);
721 nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000);
722 nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000);
723 nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000);
724 nv_mthd(priv, 0x9097, 0x0aac, 0x00000000);
725 nv_mthd(priv, 0x9097, 0x0acc, 0x00000000);
726 nv_mthd(priv, 0x9097, 0x0aec, 0x00000000);
727 nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000);
728 nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000);
729 nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000);
730 nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000);
731 nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000);
732 nv_mthd(priv, 0x9097, 0x0bac, 0x00000000);
733 nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000);
734 nv_mthd(priv, 0x9097, 0x0bec, 0x00000000);
735 nv_mthd(priv, 0x9097, 0x0a10, 0x00000000);
736 nv_mthd(priv, 0x9097, 0x0a30, 0x00000000);
737 nv_mthd(priv, 0x9097, 0x0a50, 0x00000000);
738 nv_mthd(priv, 0x9097, 0x0a70, 0x00000000);
739 nv_mthd(priv, 0x9097, 0x0a90, 0x00000000);
740 nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000);
741 nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000);
742 nv_mthd(priv, 0x9097, 0x0af0, 0x00000000);
743 nv_mthd(priv, 0x9097, 0x0b10, 0x00000000);
744 nv_mthd(priv, 0x9097, 0x0b30, 0x00000000);
745 nv_mthd(priv, 0x9097, 0x0b50, 0x00000000);
746 nv_mthd(priv, 0x9097, 0x0b70, 0x00000000);
747 nv_mthd(priv, 0x9097, 0x0b90, 0x00000000);
748 nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000);
749 nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000);
750 nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000);
751 nv_mthd(priv, 0x9097, 0x0a14, 0x00000000);
752 nv_mthd(priv, 0x9097, 0x0a34, 0x00000000);
753 nv_mthd(priv, 0x9097, 0x0a54, 0x00000000);
754 nv_mthd(priv, 0x9097, 0x0a74, 0x00000000);
755 nv_mthd(priv, 0x9097, 0x0a94, 0x00000000);
756 nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000);
757 nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000);
758 nv_mthd(priv, 0x9097, 0x0af4, 0x00000000);
759 nv_mthd(priv, 0x9097, 0x0b14, 0x00000000);
760 nv_mthd(priv, 0x9097, 0x0b34, 0x00000000);
761 nv_mthd(priv, 0x9097, 0x0b54, 0x00000000);
762 nv_mthd(priv, 0x9097, 0x0b74, 0x00000000);
763 nv_mthd(priv, 0x9097, 0x0b94, 0x00000000);
764 nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000);
765 nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000);
766 nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000);
767 nv_mthd(priv, 0x9097, 0x0c00, 0x00000000);
768 nv_mthd(priv, 0x9097, 0x0c10, 0x00000000);
769 nv_mthd(priv, 0x9097, 0x0c20, 0x00000000);
770 nv_mthd(priv, 0x9097, 0x0c30, 0x00000000);
771 nv_mthd(priv, 0x9097, 0x0c40, 0x00000000);
772 nv_mthd(priv, 0x9097, 0x0c50, 0x00000000);
773 nv_mthd(priv, 0x9097, 0x0c60, 0x00000000);
774 nv_mthd(priv, 0x9097, 0x0c70, 0x00000000);
775 nv_mthd(priv, 0x9097, 0x0c80, 0x00000000);
776 nv_mthd(priv, 0x9097, 0x0c90, 0x00000000);
777 nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000);
778 nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000);
779 nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000);
780 nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000);
781 nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000);
782 nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000);
783 nv_mthd(priv, 0x9097, 0x0c04, 0x00000000);
784 nv_mthd(priv, 0x9097, 0x0c14, 0x00000000);
785 nv_mthd(priv, 0x9097, 0x0c24, 0x00000000);
786 nv_mthd(priv, 0x9097, 0x0c34, 0x00000000);
787 nv_mthd(priv, 0x9097, 0x0c44, 0x00000000);
788 nv_mthd(priv, 0x9097, 0x0c54, 0x00000000);
789 nv_mthd(priv, 0x9097, 0x0c64, 0x00000000);
790 nv_mthd(priv, 0x9097, 0x0c74, 0x00000000);
791 nv_mthd(priv, 0x9097, 0x0c84, 0x00000000);
792 nv_mthd(priv, 0x9097, 0x0c94, 0x00000000);
793 nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000);
794 nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000);
795 nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000);
796 nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000);
797 nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000);
798 nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000);
799 nv_mthd(priv, 0x9097, 0x0c08, 0x00000000);
800 nv_mthd(priv, 0x9097, 0x0c18, 0x00000000);
801 nv_mthd(priv, 0x9097, 0x0c28, 0x00000000);
802 nv_mthd(priv, 0x9097, 0x0c38, 0x00000000);
803 nv_mthd(priv, 0x9097, 0x0c48, 0x00000000);
804 nv_mthd(priv, 0x9097, 0x0c58, 0x00000000);
805 nv_mthd(priv, 0x9097, 0x0c68, 0x00000000);
806 nv_mthd(priv, 0x9097, 0x0c78, 0x00000000);
807 nv_mthd(priv, 0x9097, 0x0c88, 0x00000000);
808 nv_mthd(priv, 0x9097, 0x0c98, 0x00000000);
809 nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000);
810 nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000);
811 nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000);
812 nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000);
813 nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000);
814 nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000);
815 nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000);
816 nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000);
817 nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000);
818 nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000);
819 nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000);
820 nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000);
821 nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000);
822 nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000);
823 nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000);
824 nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000);
825 nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000);
826 nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000);
827 nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000);
828 nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000);
829 nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000);
830 nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000);
831 nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000);
832 nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000);
833 nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000);
834 nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000);
835 nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000);
836 nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000);
837 nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000);
838 nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000);
839 nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000);
840 nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000);
841 nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000);
842 nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000);
843 nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000);
844 nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000);
845 nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000);
846 nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000);
847 nv_mthd(priv, 0x9097, 0x0e00, 0x00000000);
848 nv_mthd(priv, 0x9097, 0x0e10, 0x00000000);
849 nv_mthd(priv, 0x9097, 0x0e20, 0x00000000);
850 nv_mthd(priv, 0x9097, 0x0e30, 0x00000000);
851 nv_mthd(priv, 0x9097, 0x0e40, 0x00000000);
852 nv_mthd(priv, 0x9097, 0x0e50, 0x00000000);
853 nv_mthd(priv, 0x9097, 0x0e60, 0x00000000);
854 nv_mthd(priv, 0x9097, 0x0e70, 0x00000000);
855 nv_mthd(priv, 0x9097, 0x0e80, 0x00000000);
856 nv_mthd(priv, 0x9097, 0x0e90, 0x00000000);
857 nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000);
858 nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000);
859 nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000);
860 nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000);
861 nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000);
862 nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000);
863 nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000);
864 nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000);
865 nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000);
866 nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000);
867 nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000);
868 nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000);
869 nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000);
870 nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000);
871 nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000);
872 nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000);
873 nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000);
874 nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000);
875 nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000);
876 nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000);
877 nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000);
878 nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000);
879 nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000);
880 nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000);
881 nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000);
882 nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000);
883 nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000);
884 nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000);
885 nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000);
886 nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000);
887 nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000);
888 nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000);
889 nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000);
890 nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000);
891 nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000);
892 nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000);
893 nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000);
894 nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000);
895 nv_mthd(priv, 0x9097, 0x0d40, 0x00000000);
896 nv_mthd(priv, 0x9097, 0x0d48, 0x00000000);
897 nv_mthd(priv, 0x9097, 0x0d50, 0x00000000);
898 nv_mthd(priv, 0x9097, 0x0d58, 0x00000000);
899 nv_mthd(priv, 0x9097, 0x0d44, 0x00000000);
900 nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000);
901 nv_mthd(priv, 0x9097, 0x0d54, 0x00000000);
902 nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000);
903 nv_mthd(priv, 0x9097, 0x1e00, 0x00000001);
904 nv_mthd(priv, 0x9097, 0x1e20, 0x00000001);
905 nv_mthd(priv, 0x9097, 0x1e40, 0x00000001);
906 nv_mthd(priv, 0x9097, 0x1e60, 0x00000001);
907 nv_mthd(priv, 0x9097, 0x1e80, 0x00000001);
908 nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001);
909 nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001);
910 nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001);
911 nv_mthd(priv, 0x9097, 0x1e04, 0x00000001);
912 nv_mthd(priv, 0x9097, 0x1e24, 0x00000001);
913 nv_mthd(priv, 0x9097, 0x1e44, 0x00000001);
914 nv_mthd(priv, 0x9097, 0x1e64, 0x00000001);
915 nv_mthd(priv, 0x9097, 0x1e84, 0x00000001);
916 nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001);
917 nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001);
918 nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001);
919 nv_mthd(priv, 0x9097, 0x1e08, 0x00000002);
920 nv_mthd(priv, 0x9097, 0x1e28, 0x00000002);
921 nv_mthd(priv, 0x9097, 0x1e48, 0x00000002);
922 nv_mthd(priv, 0x9097, 0x1e68, 0x00000002);
923 nv_mthd(priv, 0x9097, 0x1e88, 0x00000002);
924 nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002);
925 nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002);
926 nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002);
927 nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001);
928 nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001);
929 nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001);
930 nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001);
931 nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001);
932 nv_mthd(priv, 0x9097, 0x1eac, 0x00000001);
933 nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001);
934 nv_mthd(priv, 0x9097, 0x1eec, 0x00000001);
935 nv_mthd(priv, 0x9097, 0x1e10, 0x00000001);
936 nv_mthd(priv, 0x9097, 0x1e30, 0x00000001);
937 nv_mthd(priv, 0x9097, 0x1e50, 0x00000001);
938 nv_mthd(priv, 0x9097, 0x1e70, 0x00000001);
939 nv_mthd(priv, 0x9097, 0x1e90, 0x00000001);
940 nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001);
941 nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001);
942 nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001);
943 nv_mthd(priv, 0x9097, 0x1e14, 0x00000002);
944 nv_mthd(priv, 0x9097, 0x1e34, 0x00000002);
945 nv_mthd(priv, 0x9097, 0x1e54, 0x00000002);
946 nv_mthd(priv, 0x9097, 0x1e74, 0x00000002);
947 nv_mthd(priv, 0x9097, 0x1e94, 0x00000002);
948 nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002);
949 nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002);
950 nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002);
951 nv_mthd(priv, 0x9097, 0x1e18, 0x00000001);
952 nv_mthd(priv, 0x9097, 0x1e38, 0x00000001);
953 nv_mthd(priv, 0x9097, 0x1e58, 0x00000001);
954 nv_mthd(priv, 0x9097, 0x1e78, 0x00000001);
955 nv_mthd(priv, 0x9097, 0x1e98, 0x00000001);
956 nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001);
957 nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001);
958 nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001);
959 if (fermi == 0x9097) {
960 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
961 nv_mthd(priv, 0x9097, mthd, 0x00000000);
962 }
963 nv_mthd(priv, 0x9097, 0x030c, 0x00000001);
964 nv_mthd(priv, 0x9097, 0x1944, 0x00000000);
965 nv_mthd(priv, 0x9097, 0x1514, 0x00000000);
966 nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff);
967 nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881);
968 nv_mthd(priv, 0x9097, 0x0fac, 0x00000001);
969 nv_mthd(priv, 0x9097, 0x1538, 0x00000001);
970 nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000);
971 nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000);
972 nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014);
973 nv_mthd(priv, 0x9097, 0x0fec, 0x00000040);
974 nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000);
975 nv_mthd(priv, 0x9097, 0x179c, 0x00000000);
976 nv_mthd(priv, 0x9097, 0x1228, 0x00000400);
977 nv_mthd(priv, 0x9097, 0x122c, 0x00000300);
978 nv_mthd(priv, 0x9097, 0x1230, 0x00010001);
979 nv_mthd(priv, 0x9097, 0x07f8, 0x00000000);
980 nv_mthd(priv, 0x9097, 0x15b4, 0x00000001);
981 nv_mthd(priv, 0x9097, 0x15cc, 0x00000000);
982 nv_mthd(priv, 0x9097, 0x1534, 0x00000000);
983 nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000);
984 nv_mthd(priv, 0x9097, 0x15d0, 0x00000000);
985 nv_mthd(priv, 0x9097, 0x153c, 0x00000000);
986 nv_mthd(priv, 0x9097, 0x16b4, 0x00000003);
987 nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff);
988 nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff);
989 nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff);
990 nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff);
991 nv_mthd(priv, 0x9097, 0x0df8, 0x00000000);
992 nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000);
993 nv_mthd(priv, 0x9097, 0x1948, 0x00000000);
994 nv_mthd(priv, 0x9097, 0x1970, 0x00000001);
995 nv_mthd(priv, 0x9097, 0x161c, 0x000009f0);
996 nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010);
997 nv_mthd(priv, 0x9097, 0x163c, 0x00000000);
998 nv_mthd(priv, 0x9097, 0x15e4, 0x00000000);
999 nv_mthd(priv, 0x9097, 0x1160, 0x25e00040);
1000 nv_mthd(priv, 0x9097, 0x1164, 0x25e00040);
1001 nv_mthd(priv, 0x9097, 0x1168, 0x25e00040);
1002 nv_mthd(priv, 0x9097, 0x116c, 0x25e00040);
1003 nv_mthd(priv, 0x9097, 0x1170, 0x25e00040);
1004 nv_mthd(priv, 0x9097, 0x1174, 0x25e00040);
1005 nv_mthd(priv, 0x9097, 0x1178, 0x25e00040);
1006 nv_mthd(priv, 0x9097, 0x117c, 0x25e00040);
1007 nv_mthd(priv, 0x9097, 0x1180, 0x25e00040);
1008 nv_mthd(priv, 0x9097, 0x1184, 0x25e00040);
1009 nv_mthd(priv, 0x9097, 0x1188, 0x25e00040);
1010 nv_mthd(priv, 0x9097, 0x118c, 0x25e00040);
1011 nv_mthd(priv, 0x9097, 0x1190, 0x25e00040);
1012 nv_mthd(priv, 0x9097, 0x1194, 0x25e00040);
1013 nv_mthd(priv, 0x9097, 0x1198, 0x25e00040);
1014 nv_mthd(priv, 0x9097, 0x119c, 0x25e00040);
1015 nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040);
1016 nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040);
1017 nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040);
1018 nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040);
1019 nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040);
1020 nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040);
1021 nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040);
1022 nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040);
1023 nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040);
1024 nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040);
1025 nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040);
1026 nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040);
1027 nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040);
1028 nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040);
1029 nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040);
1030 nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040);
1031 nv_mthd(priv, 0x9097, 0x1880, 0x00000000);
1032 nv_mthd(priv, 0x9097, 0x1884, 0x00000000);
1033 nv_mthd(priv, 0x9097, 0x1888, 0x00000000);
1034 nv_mthd(priv, 0x9097, 0x188c, 0x00000000);
1035 nv_mthd(priv, 0x9097, 0x1890, 0x00000000);
1036 nv_mthd(priv, 0x9097, 0x1894, 0x00000000);
1037 nv_mthd(priv, 0x9097, 0x1898, 0x00000000);
1038 nv_mthd(priv, 0x9097, 0x189c, 0x00000000);
1039 nv_mthd(priv, 0x9097, 0x18a0, 0x00000000);
1040 nv_mthd(priv, 0x9097, 0x18a4, 0x00000000);
1041 nv_mthd(priv, 0x9097, 0x18a8, 0x00000000);
1042 nv_mthd(priv, 0x9097, 0x18ac, 0x00000000);
1043 nv_mthd(priv, 0x9097, 0x18b0, 0x00000000);
1044 nv_mthd(priv, 0x9097, 0x18b4, 0x00000000);
1045 nv_mthd(priv, 0x9097, 0x18b8, 0x00000000);
1046 nv_mthd(priv, 0x9097, 0x18bc, 0x00000000);
1047 nv_mthd(priv, 0x9097, 0x18c0, 0x00000000);
1048 nv_mthd(priv, 0x9097, 0x18c4, 0x00000000);
1049 nv_mthd(priv, 0x9097, 0x18c8, 0x00000000);
1050 nv_mthd(priv, 0x9097, 0x18cc, 0x00000000);
1051 nv_mthd(priv, 0x9097, 0x18d0, 0x00000000);
1052 nv_mthd(priv, 0x9097, 0x18d4, 0x00000000);
1053 nv_mthd(priv, 0x9097, 0x18d8, 0x00000000);
1054 nv_mthd(priv, 0x9097, 0x18dc, 0x00000000);
1055 nv_mthd(priv, 0x9097, 0x18e0, 0x00000000);
1056 nv_mthd(priv, 0x9097, 0x18e4, 0x00000000);
1057 nv_mthd(priv, 0x9097, 0x18e8, 0x00000000);
1058 nv_mthd(priv, 0x9097, 0x18ec, 0x00000000);
1059 nv_mthd(priv, 0x9097, 0x18f0, 0x00000000);
1060 nv_mthd(priv, 0x9097, 0x18f4, 0x00000000);
1061 nv_mthd(priv, 0x9097, 0x18f8, 0x00000000);
1062 nv_mthd(priv, 0x9097, 0x18fc, 0x00000000);
1063 nv_mthd(priv, 0x9097, 0x0f84, 0x00000000);
1064 nv_mthd(priv, 0x9097, 0x0f88, 0x00000000);
1065 nv_mthd(priv, 0x9097, 0x17c8, 0x00000000);
1066 nv_mthd(priv, 0x9097, 0x17cc, 0x00000000);
1067 nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff);
1068 nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff);
1069 nv_mthd(priv, 0x9097, 0x17d8, 0x00000002);
1070 nv_mthd(priv, 0x9097, 0x17dc, 0x00000000);
1071 nv_mthd(priv, 0x9097, 0x15f4, 0x00000000);
1072 nv_mthd(priv, 0x9097, 0x15f8, 0x00000000);
1073 nv_mthd(priv, 0x9097, 0x1434, 0x00000000);
1074 nv_mthd(priv, 0x9097, 0x1438, 0x00000000);
1075 nv_mthd(priv, 0x9097, 0x0d74, 0x00000000);
1076 nv_mthd(priv, 0x9097, 0x0dec, 0x00000001);
1077 nv_mthd(priv, 0x9097, 0x13a4, 0x00000000);
1078 nv_mthd(priv, 0x9097, 0x1318, 0x00000001);
1079 nv_mthd(priv, 0x9097, 0x1644, 0x00000000);
1080 nv_mthd(priv, 0x9097, 0x0748, 0x00000000);
1081 nv_mthd(priv, 0x9097, 0x0de8, 0x00000000);
1082 nv_mthd(priv, 0x9097, 0x1648, 0x00000000);
1083 nv_mthd(priv, 0x9097, 0x12a4, 0x00000000);
1084 nv_mthd(priv, 0x9097, 0x1120, 0x00000000);
1085 nv_mthd(priv, 0x9097, 0x1124, 0x00000000);
1086 nv_mthd(priv, 0x9097, 0x1128, 0x00000000);
1087 nv_mthd(priv, 0x9097, 0x112c, 0x00000000);
1088 nv_mthd(priv, 0x9097, 0x1118, 0x00000000);
1089 nv_mthd(priv, 0x9097, 0x164c, 0x00000000);
1090 nv_mthd(priv, 0x9097, 0x1658, 0x00000000);
1091 nv_mthd(priv, 0x9097, 0x1910, 0x00000290);
1092 nv_mthd(priv, 0x9097, 0x1518, 0x00000000);
1093 nv_mthd(priv, 0x9097, 0x165c, 0x00000001);
1094 nv_mthd(priv, 0x9097, 0x1520, 0x00000000);
1095 nv_mthd(priv, 0x9097, 0x1604, 0x00000000);
1096 nv_mthd(priv, 0x9097, 0x1570, 0x00000000);
1097 nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000);
1098 nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000);
1099 nv_mthd(priv, 0x9097, 0x020c, 0x00000000);
1100 nv_mthd(priv, 0x9097, 0x1670, 0x30201000);
1101 nv_mthd(priv, 0x9097, 0x1674, 0x70605040);
1102 nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888);
1103 nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8);
1104 nv_mthd(priv, 0x9097, 0x166c, 0x00000000);
1105 nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00);
1106 nv_mthd(priv, 0x9097, 0x12d0, 0x00000003);
1107 nv_mthd(priv, 0x9097, 0x12d4, 0x00000002);
1108 nv_mthd(priv, 0x9097, 0x1684, 0x00000000);
1109 nv_mthd(priv, 0x9097, 0x1688, 0x00000000);
1110 nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02);
1111 nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02);
1112 nv_mthd(priv, 0x9097, 0x0db4, 0x00000000);
1113 nv_mthd(priv, 0x9097, 0x168c, 0x00000000);
1114 nv_mthd(priv, 0x9097, 0x15bc, 0x00000000);
1115 nv_mthd(priv, 0x9097, 0x156c, 0x00000000);
1116 nv_mthd(priv, 0x9097, 0x187c, 0x00000000);
1117 nv_mthd(priv, 0x9097, 0x1110, 0x00000001);
1118 nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000);
1119 nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000);
1120 nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000);
1121 nv_mthd(priv, 0x9097, 0x1234, 0x00000000);
1122 nv_mthd(priv, 0x9097, 0x1690, 0x00000000);
1123 nv_mthd(priv, 0x9097, 0x12ac, 0x00000001);
1124 nv_mthd(priv, 0x9097, 0x02c4, 0x00000000);
1125 nv_mthd(priv, 0x9097, 0x0790, 0x00000000);
1126 nv_mthd(priv, 0x9097, 0x0794, 0x00000000);
1127 nv_mthd(priv, 0x9097, 0x0798, 0x00000000);
1128 nv_mthd(priv, 0x9097, 0x079c, 0x00000000);
1129 nv_mthd(priv, 0x9097, 0x07a0, 0x00000000);
1130 nv_mthd(priv, 0x9097, 0x077c, 0x00000000);
1131 nv_mthd(priv, 0x9097, 0x1000, 0x00000010);
1132 nv_mthd(priv, 0x9097, 0x10fc, 0x00000000);
1133 nv_mthd(priv, 0x9097, 0x1290, 0x00000000);
1134 nv_mthd(priv, 0x9097, 0x0218, 0x00000010);
1135 nv_mthd(priv, 0x9097, 0x12d8, 0x00000000);
1136 nv_mthd(priv, 0x9097, 0x12dc, 0x00000010);
1137 nv_mthd(priv, 0x9097, 0x0d94, 0x00000001);
1138 nv_mthd(priv, 0x9097, 0x155c, 0x00000000);
1139 nv_mthd(priv, 0x9097, 0x1560, 0x00000000);
1140 nv_mthd(priv, 0x9097, 0x1564, 0x00001fff);
1141 nv_mthd(priv, 0x9097, 0x1574, 0x00000000);
1142 nv_mthd(priv, 0x9097, 0x1578, 0x00000000);
1143 nv_mthd(priv, 0x9097, 0x157c, 0x003fffff);
1144 nv_mthd(priv, 0x9097, 0x1354, 0x00000000);
1145 nv_mthd(priv, 0x9097, 0x1664, 0x00000000);
1146 nv_mthd(priv, 0x9097, 0x1610, 0x00000012);
1147 nv_mthd(priv, 0x9097, 0x1608, 0x00000000);
1148 nv_mthd(priv, 0x9097, 0x160c, 0x00000000);
1149 nv_mthd(priv, 0x9097, 0x162c, 0x00000003);
1150 nv_mthd(priv, 0x9097, 0x0210, 0x00000000);
1151 nv_mthd(priv, 0x9097, 0x0320, 0x00000000);
1152 nv_mthd(priv, 0x9097, 0x0324, 0x3f800000);
1153 nv_mthd(priv, 0x9097, 0x0328, 0x3f800000);
1154 nv_mthd(priv, 0x9097, 0x032c, 0x3f800000);
1155 nv_mthd(priv, 0x9097, 0x0330, 0x3f800000);
1156 nv_mthd(priv, 0x9097, 0x0334, 0x3f800000);
1157 nv_mthd(priv, 0x9097, 0x0338, 0x3f800000);
1158 nv_mthd(priv, 0x9097, 0x0750, 0x00000000);
1159 nv_mthd(priv, 0x9097, 0x0760, 0x39291909);
1160 nv_mthd(priv, 0x9097, 0x0764, 0x79695949);
1161 nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989);
1162 nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9);
1163 nv_mthd(priv, 0x9097, 0x0770, 0x30201000);
1164 nv_mthd(priv, 0x9097, 0x0774, 0x70605040);
1165 nv_mthd(priv, 0x9097, 0x0778, 0x00009080);
1166 nv_mthd(priv, 0x9097, 0x0780, 0x39291909);
1167 nv_mthd(priv, 0x9097, 0x0784, 0x79695949);
1168 nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989);
1169 nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9);
1170 nv_mthd(priv, 0x9097, 0x07d0, 0x30201000);
1171 nv_mthd(priv, 0x9097, 0x07d4, 0x70605040);
1172 nv_mthd(priv, 0x9097, 0x07d8, 0x00009080);
1173 nv_mthd(priv, 0x9097, 0x037c, 0x00000001);
1174 nv_mthd(priv, 0x9097, 0x0740, 0x00000000);
1175 nv_mthd(priv, 0x9097, 0x0744, 0x00000000);
1176 nv_mthd(priv, 0x9097, 0x2600, 0x00000000);
1177 nv_mthd(priv, 0x9097, 0x1918, 0x00000000);
1178 nv_mthd(priv, 0x9097, 0x191c, 0x00000900);
1179 nv_mthd(priv, 0x9097, 0x1920, 0x00000405);
1180 nv_mthd(priv, 0x9097, 0x1308, 0x00000001);
1181 nv_mthd(priv, 0x9097, 0x1924, 0x00000000);
1182 nv_mthd(priv, 0x9097, 0x13ac, 0x00000000);
1183 nv_mthd(priv, 0x9097, 0x192c, 0x00000001);
1184 nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c);
1185 nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000);
1186 nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000);
1187 nv_mthd(priv, 0x9097, 0x02c0, 0x00000001);
1188 nv_mthd(priv, 0x9097, 0x1510, 0x00000000);
1189 nv_mthd(priv, 0x9097, 0x1940, 0x00000000);
1190 nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000);
1191 nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000);
1192 nv_mthd(priv, 0x9097, 0x194c, 0x00000000);
1193 nv_mthd(priv, 0x9097, 0x1950, 0x00000000);
1194 nv_mthd(priv, 0x9097, 0x1968, 0x00000000);
1195 nv_mthd(priv, 0x9097, 0x1590, 0x0000003f);
1196 nv_mthd(priv, 0x9097, 0x07e8, 0x00000000);
1197 nv_mthd(priv, 0x9097, 0x07ec, 0x00000000);
1198 nv_mthd(priv, 0x9097, 0x07f0, 0x00000000);
1199 nv_mthd(priv, 0x9097, 0x07f4, 0x00000000);
1200 nv_mthd(priv, 0x9097, 0x196c, 0x00000011);
1201 nv_mthd(priv, 0x9097, 0x197c, 0x00000000);
1202 nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000);
1203 nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000);
1204 nv_mthd(priv, 0x9097, 0x02d8, 0x00000040);
1205 nv_mthd(priv, 0x9097, 0x1980, 0x00000080);
1206 nv_mthd(priv, 0x9097, 0x1504, 0x00000080);
1207 nv_mthd(priv, 0x9097, 0x1984, 0x00000000);
1208 nv_mthd(priv, 0x9097, 0x0300, 0x00000001);
1209 nv_mthd(priv, 0x9097, 0x13a8, 0x00000000);
1210 nv_mthd(priv, 0x9097, 0x12ec, 0x00000000);
1211 nv_mthd(priv, 0x9097, 0x1310, 0x00000000);
1212 nv_mthd(priv, 0x9097, 0x1314, 0x00000001);
1213 nv_mthd(priv, 0x9097, 0x1380, 0x00000000);
1214 nv_mthd(priv, 0x9097, 0x1384, 0x00000001);
1215 nv_mthd(priv, 0x9097, 0x1388, 0x00000001);
1216 nv_mthd(priv, 0x9097, 0x138c, 0x00000001);
1217 nv_mthd(priv, 0x9097, 0x1390, 0x00000001);
1218 nv_mthd(priv, 0x9097, 0x1394, 0x00000000);
1219 nv_mthd(priv, 0x9097, 0x139c, 0x00000000);
1220 nv_mthd(priv, 0x9097, 0x1398, 0x00000000);
1221 nv_mthd(priv, 0x9097, 0x1594, 0x00000000);
1222 nv_mthd(priv, 0x9097, 0x1598, 0x00000001);
1223 nv_mthd(priv, 0x9097, 0x159c, 0x00000001);
1224 nv_mthd(priv, 0x9097, 0x15a0, 0x00000001);
1225 nv_mthd(priv, 0x9097, 0x15a4, 0x00000001);
1226 nv_mthd(priv, 0x9097, 0x0f54, 0x00000000);
1227 nv_mthd(priv, 0x9097, 0x0f58, 0x00000000);
1228 nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000);
1229 nv_mthd(priv, 0x9097, 0x19bc, 0x00000000);
1230 nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000);
1231 nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000);
1232 nv_mthd(priv, 0x9097, 0x12cc, 0x00000000);
1233 nv_mthd(priv, 0x9097, 0x12e8, 0x00000000);
1234 nv_mthd(priv, 0x9097, 0x130c, 0x00000001);
1235 nv_mthd(priv, 0x9097, 0x1360, 0x00000000);
1236 nv_mthd(priv, 0x9097, 0x1364, 0x00000000);
1237 nv_mthd(priv, 0x9097, 0x1368, 0x00000000);
1238 nv_mthd(priv, 0x9097, 0x136c, 0x00000000);
1239 nv_mthd(priv, 0x9097, 0x1370, 0x00000000);
1240 nv_mthd(priv, 0x9097, 0x1374, 0x00000000);
1241 nv_mthd(priv, 0x9097, 0x1378, 0x00000000);
1242 nv_mthd(priv, 0x9097, 0x137c, 0x00000000);
1243 nv_mthd(priv, 0x9097, 0x133c, 0x00000001);
1244 nv_mthd(priv, 0x9097, 0x1340, 0x00000001);
1245 nv_mthd(priv, 0x9097, 0x1344, 0x00000002);
1246 nv_mthd(priv, 0x9097, 0x1348, 0x00000001);
1247 nv_mthd(priv, 0x9097, 0x134c, 0x00000001);
1248 nv_mthd(priv, 0x9097, 0x1350, 0x00000002);
1249 nv_mthd(priv, 0x9097, 0x1358, 0x00000001);
1250 nv_mthd(priv, 0x9097, 0x12e4, 0x00000000);
1251 nv_mthd(priv, 0x9097, 0x131c, 0x00000000);
1252 nv_mthd(priv, 0x9097, 0x1320, 0x00000000);
1253 nv_mthd(priv, 0x9097, 0x1324, 0x00000000);
1254 nv_mthd(priv, 0x9097, 0x1328, 0x00000000);
1255 nv_mthd(priv, 0x9097, 0x19c0, 0x00000000);
1256 nv_mthd(priv, 0x9097, 0x1140, 0x00000000);
1257 nv_mthd(priv, 0x9097, 0x19c4, 0x00000000);
1258 nv_mthd(priv, 0x9097, 0x19c8, 0x00001500);
1259 nv_mthd(priv, 0x9097, 0x135c, 0x00000000);
1260 nv_mthd(priv, 0x9097, 0x0f90, 0x00000000);
1261 nv_mthd(priv, 0x9097, 0x19e0, 0x00000001);
1262 nv_mthd(priv, 0x9097, 0x19e4, 0x00000001);
1263 nv_mthd(priv, 0x9097, 0x19e8, 0x00000001);
1264 nv_mthd(priv, 0x9097, 0x19ec, 0x00000001);
1265 nv_mthd(priv, 0x9097, 0x19f0, 0x00000001);
1266 nv_mthd(priv, 0x9097, 0x19f4, 0x00000001);
1267 nv_mthd(priv, 0x9097, 0x19f8, 0x00000001);
1268 nv_mthd(priv, 0x9097, 0x19fc, 0x00000001);
1269 nv_mthd(priv, 0x9097, 0x19cc, 0x00000001);
1270 nv_mthd(priv, 0x9097, 0x15b8, 0x00000000);
1271 nv_mthd(priv, 0x9097, 0x1a00, 0x00001111);
1272 nv_mthd(priv, 0x9097, 0x1a04, 0x00000000);
1273 nv_mthd(priv, 0x9097, 0x1a08, 0x00000000);
1274 nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000);
1275 nv_mthd(priv, 0x9097, 0x1a10, 0x00000000);
1276 nv_mthd(priv, 0x9097, 0x1a14, 0x00000000);
1277 nv_mthd(priv, 0x9097, 0x1a18, 0x00000000);
1278 nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000);
1279 nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000);
1280 nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000);
1281 nv_mthd(priv, 0x9097, 0x10f8, 0x00001010);
1282 nv_mthd(priv, 0x9097, 0x0d80, 0x00000000);
1283 nv_mthd(priv, 0x9097, 0x0d84, 0x00000000);
1284 nv_mthd(priv, 0x9097, 0x0d88, 0x00000000);
1285 nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000);
1286 nv_mthd(priv, 0x9097, 0x0d90, 0x00000000);
1287 nv_mthd(priv, 0x9097, 0x0da0, 0x00000000);
1288 nv_mthd(priv, 0x9097, 0x1508, 0x80000000);
1289 nv_mthd(priv, 0x9097, 0x150c, 0x40000000);
1290 nv_mthd(priv, 0x9097, 0x1668, 0x00000000);
1291 nv_mthd(priv, 0x9097, 0x0318, 0x00000008);
1292 nv_mthd(priv, 0x9097, 0x031c, 0x00000008);
1293 nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001);
1294 nv_mthd(priv, 0x9097, 0x07dc, 0x00000000);
1295 nv_mthd(priv, 0x9097, 0x074c, 0x00000055);
1296 nv_mthd(priv, 0x9097, 0x1420, 0x00000003);
1297 nv_mthd(priv, 0x9097, 0x17bc, 0x00000000);
1298 nv_mthd(priv, 0x9097, 0x17c0, 0x00000000);
1299 nv_mthd(priv, 0x9097, 0x17c4, 0x00000001);
1300 nv_mthd(priv, 0x9097, 0x1008, 0x00000008);
1301 nv_mthd(priv, 0x9097, 0x100c, 0x00000040);
1302 nv_mthd(priv, 0x9097, 0x1010, 0x0000012c);
1303 nv_mthd(priv, 0x9097, 0x0d60, 0x00000040);
1304 nv_mthd(priv, 0x9097, 0x075c, 0x00000003);
1305 nv_mthd(priv, 0x9097, 0x1018, 0x00000020);
1306 nv_mthd(priv, 0x9097, 0x101c, 0x00000001);
1307 nv_mthd(priv, 0x9097, 0x1020, 0x00000020);
1308 nv_mthd(priv, 0x9097, 0x1024, 0x00000001);
1309 nv_mthd(priv, 0x9097, 0x1444, 0x00000000);
1310 nv_mthd(priv, 0x9097, 0x1448, 0x00000000);
1311 nv_mthd(priv, 0x9097, 0x144c, 0x00000000);
1312 nv_mthd(priv, 0x9097, 0x0360, 0x20164010);
1313 nv_mthd(priv, 0x9097, 0x0364, 0x00000020);
1314 nv_mthd(priv, 0x9097, 0x0368, 0x00000000);
1315 nv_mthd(priv, 0x9097, 0x0de4, 0x00000000);
1316 nv_mthd(priv, 0x9097, 0x0204, 0x00000006);
1317 nv_mthd(priv, 0x9097, 0x0208, 0x00000000);
1318 nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff);
1319 nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48);
1320 nv_mthd(priv, 0x9097, 0x1220, 0x00000005);
1321 nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000);
1322 nv_mthd(priv, 0x9097, 0x0f98, 0x00300008);
1323 nv_mthd(priv, 0x9097, 0x1284, 0x04000080);
1324 nv_mthd(priv, 0x9097, 0x1450, 0x00300008);
1325 nv_mthd(priv, 0x9097, 0x1454, 0x04000080);
1326 nv_mthd(priv, 0x9097, 0x0214, 0x00000000);
1327 /* in trace, right after 0x90c0, not here */
1328 nv_mthd(priv, 0x9097, 0x3410, 0x80002006);
1329}
1330
1331static void
1332nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
1333{
1334 u32 fermi = nvc0_graph_class(priv);
1335 u32 mthd;
1336
1337 if (fermi == 0x9197) {
1338 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1339 nv_mthd(priv, 0x9197, mthd, 0x00000000);
1340 }
1341 nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001);
1342}
1343
1344static void
1345nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
1346{
1347 u32 fermi = nvc0_graph_class(priv);
1348 u32 mthd;
1349
1350 if (fermi == 0x9297) {
1351 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1352 nv_mthd(priv, 0x9297, mthd, 0x00000000);
1353 }
1354 nv_mthd(priv, 0x9297, 0x036c, 0x00000000);
1355 nv_mthd(priv, 0x9297, 0x0370, 0x00000000);
1356 nv_mthd(priv, 0x9297, 0x07a4, 0x00000000);
1357 nv_mthd(priv, 0x9297, 0x07a8, 0x00000000);
1358 nv_mthd(priv, 0x9297, 0x0374, 0x00000000);
1359 nv_mthd(priv, 0x9297, 0x0378, 0x00000020);
1360}
1361
1362static void
1363nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
1364{
1365 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
1366 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
1367 nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
1368 nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
1369 nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
1370 nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
1371 nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
1372 nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
1373 nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
1374 nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
1375 nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
1376 nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
1377 nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
1378 nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
1379 nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
1380 nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
1381 nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
1382}
1383
1384static void
1385nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
1386{
1387 nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
1388 nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
1389 nv_mthd(priv, 0x9039, 0x0314, 0x00000000);
1390 nv_mthd(priv, 0x9039, 0x0320, 0x00000000);
1391 nv_mthd(priv, 0x9039, 0x0238, 0x00000000);
1392 nv_mthd(priv, 0x9039, 0x023c, 0x00000000);
1393 nv_mthd(priv, 0x9039, 0x0318, 0x00000000);
1394 nv_mthd(priv, 0x9039, 0x031c, 0x00000000);
1395}
1396
1397static void
1398nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1399{
1400 int i;
1401
1402 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1403 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1404 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1405 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
1406 nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
1407 nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
1408 nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
1409 }
1410 nv_mthd(priv, 0x90c0, 0x270c, 0x00000000);
1411 nv_mthd(priv, 0x90c0, 0x272c, 0x00000000);
1412 nv_mthd(priv, 0x90c0, 0x274c, 0x00000000);
1413 nv_mthd(priv, 0x90c0, 0x276c, 0x00000000);
1414 nv_mthd(priv, 0x90c0, 0x278c, 0x00000000);
1415 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
1416 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
1417 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
1418 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1419 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1420 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1421 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
1422 nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
1423 }
1424 nv_mthd(priv, 0x90c0, 0x030c, 0x00000001);
1425 nv_mthd(priv, 0x90c0, 0x1944, 0x00000000);
1426 nv_mthd(priv, 0x90c0, 0x0758, 0x00000100);
1427 nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000);
1428 nv_mthd(priv, 0x90c0, 0x0790, 0x00000000);
1429 nv_mthd(priv, 0x90c0, 0x0794, 0x00000000);
1430 nv_mthd(priv, 0x90c0, 0x0798, 0x00000000);
1431 nv_mthd(priv, 0x90c0, 0x079c, 0x00000000);
1432 nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000);
1433 nv_mthd(priv, 0x90c0, 0x077c, 0x00000000);
1434 nv_mthd(priv, 0x90c0, 0x0204, 0x00000000);
1435 nv_mthd(priv, 0x90c0, 0x0208, 0x00000000);
1436 nv_mthd(priv, 0x90c0, 0x020c, 0x00000000);
1437 nv_mthd(priv, 0x90c0, 0x0214, 0x00000000);
1438 nv_mthd(priv, 0x90c0, 0x024c, 0x00000000);
1439 nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001);
1440 nv_mthd(priv, 0x90c0, 0x1608, 0x00000000);
1441 nv_mthd(priv, 0x90c0, 0x160c, 0x00000000);
1442 nv_mthd(priv, 0x90c0, 0x1664, 0x00000000);
1443}
1444
1445static void
1446nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
1447{
1448 int i;
1449
1450 nv_wr32(priv, 0x404004, 0x00000000);
1451 nv_wr32(priv, 0x404008, 0x00000000);
1452 nv_wr32(priv, 0x40400c, 0x00000000);
1453 nv_wr32(priv, 0x404010, 0x00000000);
1454 nv_wr32(priv, 0x404014, 0x00000000);
1455 nv_wr32(priv, 0x404018, 0x00000000);
1456 nv_wr32(priv, 0x40401c, 0x00000000);
1457 nv_wr32(priv, 0x404020, 0x00000000);
1458 nv_wr32(priv, 0x404024, 0x00000000);
1459 nv_wr32(priv, 0x404028, 0x00000000);
1460 nv_wr32(priv, 0x40402c, 0x00000000);
1461 nv_wr32(priv, 0x404044, 0x00000000);
1462 nv_wr32(priv, 0x404094, 0x00000000);
1463 nv_wr32(priv, 0x404098, 0x00000000);
1464 nv_wr32(priv, 0x40409c, 0x00000000);
1465 nv_wr32(priv, 0x4040a0, 0x00000000);
1466 nv_wr32(priv, 0x4040a4, 0x00000000);
1467 nv_wr32(priv, 0x4040a8, 0x00000000);
1468 nv_wr32(priv, 0x4040ac, 0x00000000);
1469 nv_wr32(priv, 0x4040b0, 0x00000000);
1470 nv_wr32(priv, 0x4040b4, 0x00000000);
1471 nv_wr32(priv, 0x4040b8, 0x00000000);
1472 nv_wr32(priv, 0x4040bc, 0x00000000);
1473 nv_wr32(priv, 0x4040c0, 0x00000000);
1474 nv_wr32(priv, 0x4040c4, 0x00000000);
1475 nv_wr32(priv, 0x4040c8, 0xf0000087);
1476 nv_wr32(priv, 0x4040d4, 0x00000000);
1477 nv_wr32(priv, 0x4040d8, 0x00000000);
1478 nv_wr32(priv, 0x4040dc, 0x00000000);
1479 nv_wr32(priv, 0x4040e0, 0x00000000);
1480 nv_wr32(priv, 0x4040e4, 0x00000000);
1481 nv_wr32(priv, 0x4040e8, 0x00001000);
1482 nv_wr32(priv, 0x4040f8, 0x00000000);
1483 nv_wr32(priv, 0x404130, 0x00000000);
1484 nv_wr32(priv, 0x404134, 0x00000000);
1485 nv_wr32(priv, 0x404138, 0x20000040);
1486 nv_wr32(priv, 0x404150, 0x0000002e);
1487 nv_wr32(priv, 0x404154, 0x00000400);
1488 nv_wr32(priv, 0x404158, 0x00000200);
1489 nv_wr32(priv, 0x404164, 0x00000055);
1490 nv_wr32(priv, 0x404168, 0x00000000);
1491 nv_wr32(priv, 0x404174, 0x00000000);
1492 nv_wr32(priv, 0x404178, 0x00000000);
1493 nv_wr32(priv, 0x40417c, 0x00000000);
1494 for (i = 0; i < 8; i++)
1495 nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */
1496}
1497
1498static void
1499nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
1500{
1501 nv_wr32(priv, 0x404404, 0x00000000);
1502 nv_wr32(priv, 0x404408, 0x00000000);
1503 nv_wr32(priv, 0x40440c, 0x00000000);
1504 nv_wr32(priv, 0x404410, 0x00000000);
1505 nv_wr32(priv, 0x404414, 0x00000000);
1506 nv_wr32(priv, 0x404418, 0x00000000);
1507 nv_wr32(priv, 0x40441c, 0x00000000);
1508 nv_wr32(priv, 0x404420, 0x00000000);
1509 nv_wr32(priv, 0x404424, 0x00000000);
1510 nv_wr32(priv, 0x404428, 0x00000000);
1511 nv_wr32(priv, 0x40442c, 0x00000000);
1512 nv_wr32(priv, 0x404430, 0x00000000);
1513 nv_wr32(priv, 0x404434, 0x00000000);
1514 nv_wr32(priv, 0x404438, 0x00000000);
1515 nv_wr32(priv, 0x404460, 0x00000000);
1516 nv_wr32(priv, 0x404464, 0x00000000);
1517 nv_wr32(priv, 0x404468, 0x00ffffff);
1518 nv_wr32(priv, 0x40446c, 0x00000000);
1519 nv_wr32(priv, 0x404480, 0x00000001);
1520 nv_wr32(priv, 0x404498, 0x00000001);
1521}
1522
1523static void
1524nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
1525{
1526 nv_wr32(priv, 0x404604, 0x00000015);
1527 nv_wr32(priv, 0x404608, 0x00000000);
1528 nv_wr32(priv, 0x40460c, 0x00002e00);
1529 nv_wr32(priv, 0x404610, 0x00000100);
1530 nv_wr32(priv, 0x404618, 0x00000000);
1531 nv_wr32(priv, 0x40461c, 0x00000000);
1532 nv_wr32(priv, 0x404620, 0x00000000);
1533 nv_wr32(priv, 0x404624, 0x00000000);
1534 nv_wr32(priv, 0x404628, 0x00000000);
1535 nv_wr32(priv, 0x40462c, 0x00000000);
1536 nv_wr32(priv, 0x404630, 0x00000000);
1537 nv_wr32(priv, 0x404634, 0x00000000);
1538 nv_wr32(priv, 0x404638, 0x00000004);
1539 nv_wr32(priv, 0x40463c, 0x00000000);
1540 nv_wr32(priv, 0x404640, 0x00000000);
1541 nv_wr32(priv, 0x404644, 0x00000000);
1542 nv_wr32(priv, 0x404648, 0x00000000);
1543 nv_wr32(priv, 0x40464c, 0x00000000);
1544 nv_wr32(priv, 0x404650, 0x00000000);
1545 nv_wr32(priv, 0x404654, 0x00000000);
1546 nv_wr32(priv, 0x404658, 0x00000000);
1547 nv_wr32(priv, 0x40465c, 0x007f0100);
1548 nv_wr32(priv, 0x404660, 0x00000000);
1549 nv_wr32(priv, 0x404664, 0x00000000);
1550 nv_wr32(priv, 0x404668, 0x00000000);
1551 nv_wr32(priv, 0x40466c, 0x00000000);
1552 nv_wr32(priv, 0x404670, 0x00000000);
1553 nv_wr32(priv, 0x404674, 0x00000000);
1554 nv_wr32(priv, 0x404678, 0x00000000);
1555 nv_wr32(priv, 0x40467c, 0x00000002);
1556 nv_wr32(priv, 0x404680, 0x00000000);
1557 nv_wr32(priv, 0x404684, 0x00000000);
1558 nv_wr32(priv, 0x404688, 0x00000000);
1559 nv_wr32(priv, 0x40468c, 0x00000000);
1560 nv_wr32(priv, 0x404690, 0x00000000);
1561 nv_wr32(priv, 0x404694, 0x00000000);
1562 nv_wr32(priv, 0x404698, 0x00000000);
1563 nv_wr32(priv, 0x40469c, 0x00000000);
1564 nv_wr32(priv, 0x4046a0, 0x007f0080);
1565 nv_wr32(priv, 0x4046a4, 0x00000000);
1566 nv_wr32(priv, 0x4046a8, 0x00000000);
1567 nv_wr32(priv, 0x4046ac, 0x00000000);
1568 nv_wr32(priv, 0x4046b0, 0x00000000);
1569 nv_wr32(priv, 0x4046b4, 0x00000000);
1570 nv_wr32(priv, 0x4046b8, 0x00000000);
1571 nv_wr32(priv, 0x4046bc, 0x00000000);
1572 nv_wr32(priv, 0x4046c0, 0x00000000);
1573 nv_wr32(priv, 0x4046c4, 0x00000000);
1574 nv_wr32(priv, 0x4046c8, 0x00000000);
1575 nv_wr32(priv, 0x4046cc, 0x00000000);
1576 nv_wr32(priv, 0x4046d0, 0x00000000);
1577 nv_wr32(priv, 0x4046d4, 0x00000000);
1578 nv_wr32(priv, 0x4046d8, 0x00000000);
1579 nv_wr32(priv, 0x4046dc, 0x00000000);
1580 nv_wr32(priv, 0x4046e0, 0x00000000);
1581 nv_wr32(priv, 0x4046e4, 0x00000000);
1582 nv_wr32(priv, 0x4046e8, 0x00000000);
1583 nv_wr32(priv, 0x4046f0, 0x00000000);
1584 nv_wr32(priv, 0x4046f4, 0x00000000);
1585}
1586
1587static void
1588nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
1589{
1590 nv_wr32(priv, 0x404700, 0x00000000);
1591 nv_wr32(priv, 0x404704, 0x00000000);
1592 nv_wr32(priv, 0x404708, 0x00000000);
1593 nv_wr32(priv, 0x40470c, 0x00000000);
1594 nv_wr32(priv, 0x404710, 0x00000000);
1595 nv_wr32(priv, 0x404714, 0x00000000);
1596 nv_wr32(priv, 0x404718, 0x00000000);
1597 nv_wr32(priv, 0x40471c, 0x00000000);
1598 nv_wr32(priv, 0x404720, 0x00000000);
1599 nv_wr32(priv, 0x404724, 0x00000000);
1600 nv_wr32(priv, 0x404728, 0x00000000);
1601 nv_wr32(priv, 0x40472c, 0x00000000);
1602 nv_wr32(priv, 0x404730, 0x00000000);
1603 nv_wr32(priv, 0x404734, 0x00000100);
1604 nv_wr32(priv, 0x404738, 0x00000000);
1605 nv_wr32(priv, 0x40473c, 0x00000000);
1606 nv_wr32(priv, 0x404740, 0x00000000);
1607 nv_wr32(priv, 0x404744, 0x00000000);
1608 nv_wr32(priv, 0x404748, 0x00000000);
1609 nv_wr32(priv, 0x40474c, 0x00000000);
1610 nv_wr32(priv, 0x404750, 0x00000000);
1611 nv_wr32(priv, 0x404754, 0x00000000);
1612}
1613
1614static void
1615nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
1616{
1617
1618 if (nv_device(priv)->chipset == 0xd9) {
1619 nv_wr32(priv, 0x405800, 0x0f8000bf);
1620 nv_wr32(priv, 0x405830, 0x02180218);
1621 nv_wr32(priv, 0x405834, 0x08000000);
1622 } else
1623 if (nv_device(priv)->chipset == 0xc1) {
1624 nv_wr32(priv, 0x405800, 0x0f8000bf);
1625 nv_wr32(priv, 0x405830, 0x02180218);
1626 nv_wr32(priv, 0x405834, 0x00000000);
1627 } else {
1628 nv_wr32(priv, 0x405800, 0x078000bf);
1629 nv_wr32(priv, 0x405830, 0x02180000);
1630 nv_wr32(priv, 0x405834, 0x00000000);
1631 }
1632 nv_wr32(priv, 0x405838, 0x00000000);
1633 nv_wr32(priv, 0x405854, 0x00000000);
1634 nv_wr32(priv, 0x405870, 0x00000001);
1635 nv_wr32(priv, 0x405874, 0x00000001);
1636 nv_wr32(priv, 0x405878, 0x00000001);
1637 nv_wr32(priv, 0x40587c, 0x00000001);
1638 nv_wr32(priv, 0x405a00, 0x00000000);
1639 nv_wr32(priv, 0x405a04, 0x00000000);
1640 nv_wr32(priv, 0x405a18, 0x00000000);
1641}
1642
1643static void
1644nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
1645{
1646 nv_wr32(priv, 0x406020, 0x000103c1);
1647 nv_wr32(priv, 0x406028, 0x00000001);
1648 nv_wr32(priv, 0x40602c, 0x00000001);
1649 nv_wr32(priv, 0x406030, 0x00000001);
1650 nv_wr32(priv, 0x406034, 0x00000001);
1651}
1652
1653static void
1654nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
1655{
1656
1657 nv_wr32(priv, 0x4064a8, 0x00000000);
1658 nv_wr32(priv, 0x4064ac, 0x00003fff);
1659 nv_wr32(priv, 0x4064b4, 0x00000000);
1660 nv_wr32(priv, 0x4064b8, 0x00000000);
1661 if (nv_device(priv)->chipset == 0xd9)
1662 nv_wr32(priv, 0x4064bc, 0x00000000);
1663 if (nv_device(priv)->chipset == 0xc1 ||
1664 nv_device(priv)->chipset == 0xd9) {
1665 nv_wr32(priv, 0x4064c0, 0x80140078);
1666 nv_wr32(priv, 0x4064c4, 0x0086ffff);
1667 }
1668}
1669
1670static void
1671nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
1672{
1673 nv_wr32(priv, 0x407804, 0x00000023);
1674 nv_wr32(priv, 0x40780c, 0x0a418820);
1675 nv_wr32(priv, 0x407810, 0x062080e6);
1676 nv_wr32(priv, 0x407814, 0x020398a4);
1677 nv_wr32(priv, 0x407818, 0x0e629062);
1678 nv_wr32(priv, 0x40781c, 0x0a418820);
1679 nv_wr32(priv, 0x407820, 0x000000e6);
1680 nv_wr32(priv, 0x4078bc, 0x00000103);
1681}
1682
1683static void
1684nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
1685{
1686 nv_wr32(priv, 0x408000, 0x00000000);
1687 nv_wr32(priv, 0x408004, 0x00000000);
1688 nv_wr32(priv, 0x408008, 0x00000018);
1689 nv_wr32(priv, 0x40800c, 0x00000000);
1690 nv_wr32(priv, 0x408010, 0x00000000);
1691 nv_wr32(priv, 0x408014, 0x00000069);
1692 nv_wr32(priv, 0x408018, 0xe100e100);
1693 nv_wr32(priv, 0x408064, 0x00000000);
1694}
1695
1696static void
1697nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
1698{
1699 int chipset = nv_device(priv)->chipset;
1700
1701 /* ROPC_BROADCAST */
1702 nv_wr32(priv, 0x408800, 0x02802a3c);
1703 nv_wr32(priv, 0x408804, 0x00000040);
1704 if (chipset == 0xd9) {
1705 nv_wr32(priv, 0x408808, 0x1043e005);
1706 nv_wr32(priv, 0x408900, 0x3080b801);
1707 nv_wr32(priv, 0x408904, 0x1043e005);
1708 nv_wr32(priv, 0x408908, 0x00c8102f);
1709 } else
1710 if (chipset == 0xc1) {
1711 nv_wr32(priv, 0x408808, 0x1003e005);
1712 nv_wr32(priv, 0x408900, 0x3080b801);
1713 nv_wr32(priv, 0x408904, 0x62000001);
1714 nv_wr32(priv, 0x408908, 0x00c80929);
1715 } else {
1716 nv_wr32(priv, 0x408808, 0x0003e00d);
1717 nv_wr32(priv, 0x408900, 0x3080b801);
1718 nv_wr32(priv, 0x408904, 0x02000001);
1719 nv_wr32(priv, 0x408908, 0x00c80929);
1720 }
1721 nv_wr32(priv, 0x40890c, 0x00000000);
1722 nv_wr32(priv, 0x408980, 0x0000011d);
1723}
1724
1725static void
1726nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1727{
1728 int chipset = nv_device(priv)->chipset;
1729 int i;
1730
1731 /* GPC_BROADCAST */
1732 nv_wr32(priv, 0x418380, 0x00000016);
1733 nv_wr32(priv, 0x418400, 0x38004e00);
1734 nv_wr32(priv, 0x418404, 0x71e0ffff);
1735 nv_wr32(priv, 0x418408, 0x00000000);
1736 nv_wr32(priv, 0x41840c, 0x00001008);
1737 nv_wr32(priv, 0x418410, 0x0fff0fff);
1738 nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
1739 nv_wr32(priv, 0x418450, 0x00000000);
1740 nv_wr32(priv, 0x418454, 0x00000000);
1741 nv_wr32(priv, 0x418458, 0x00000000);
1742 nv_wr32(priv, 0x41845c, 0x00000000);
1743 nv_wr32(priv, 0x418460, 0x00000000);
1744 nv_wr32(priv, 0x418464, 0x00000000);
1745 nv_wr32(priv, 0x418468, 0x00000001);
1746 nv_wr32(priv, 0x41846c, 0x00000000);
1747 nv_wr32(priv, 0x418470, 0x00000000);
1748 nv_wr32(priv, 0x418600, 0x0000001f);
1749 nv_wr32(priv, 0x418684, 0x0000000f);
1750 nv_wr32(priv, 0x418700, 0x00000002);
1751 nv_wr32(priv, 0x418704, 0x00000080);
1752 nv_wr32(priv, 0x418708, 0x00000000);
1753 nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
1754 nv_wr32(priv, 0x418710, 0x00000000);
1755 nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
1756 nv_wr32(priv, 0x418808, 0x00000000);
1757 nv_wr32(priv, 0x41880c, 0x00000000);
1758 nv_wr32(priv, 0x418810, 0x00000000);
1759 nv_wr32(priv, 0x418828, 0x00008442);
1760 if (chipset == 0xc1 || chipset == 0xd9)
1761 nv_wr32(priv, 0x418830, 0x10000001);
1762 else
1763 nv_wr32(priv, 0x418830, 0x00000001);
1764 nv_wr32(priv, 0x4188d8, 0x00000008);
1765 nv_wr32(priv, 0x4188e0, 0x01000000);
1766 nv_wr32(priv, 0x4188e8, 0x00000000);
1767 nv_wr32(priv, 0x4188ec, 0x00000000);
1768 nv_wr32(priv, 0x4188f0, 0x00000000);
1769 nv_wr32(priv, 0x4188f4, 0x00000000);
1770 nv_wr32(priv, 0x4188f8, 0x00000000);
1771 if (chipset == 0xd9)
1772 nv_wr32(priv, 0x4188fc, 0x20100008);
1773 else if (chipset == 0xc1)
1774 nv_wr32(priv, 0x4188fc, 0x00100018);
1775 else
1776 nv_wr32(priv, 0x4188fc, 0x00100000);
1777 nv_wr32(priv, 0x41891c, 0x00ff00ff);
1778 nv_wr32(priv, 0x418924, 0x00000000);
1779 nv_wr32(priv, 0x418928, 0x00ffff00);
1780 nv_wr32(priv, 0x41892c, 0x0000ff00);
1781 for (i = 0; i < 8; i++) {
1782 nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000);
1783 nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000);
1784 nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000);
1785 nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000);
1786 nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000);
1787 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
1788 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
1789 }
1790 nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
1791 nv_wr32(priv, 0x418b08, 0x0a418820);
1792 nv_wr32(priv, 0x418b0c, 0x062080e6);
1793 nv_wr32(priv, 0x418b10, 0x020398a4);
1794 nv_wr32(priv, 0x418b14, 0x0e629062);
1795 nv_wr32(priv, 0x418b18, 0x0a418820);
1796 nv_wr32(priv, 0x418b1c, 0x000000e6);
1797 nv_wr32(priv, 0x418bb8, 0x00000103);
1798 nv_wr32(priv, 0x418c08, 0x00000001);
1799 nv_wr32(priv, 0x418c10, 0x00000000);
1800 nv_wr32(priv, 0x418c14, 0x00000000);
1801 nv_wr32(priv, 0x418c18, 0x00000000);
1802 nv_wr32(priv, 0x418c1c, 0x00000000);
1803 nv_wr32(priv, 0x418c20, 0x00000000);
1804 nv_wr32(priv, 0x418c24, 0x00000000);
1805 nv_wr32(priv, 0x418c28, 0x00000000);
1806 nv_wr32(priv, 0x418c2c, 0x00000000);
1807 if (chipset == 0xc1 || chipset == 0xd9)
1808 nv_wr32(priv, 0x418c6c, 0x00000001);
1809 nv_wr32(priv, 0x418c80, 0x20200004);
1810 nv_wr32(priv, 0x418c8c, 0x00000001);
1811 nv_wr32(priv, 0x419000, 0x00000780);
1812 nv_wr32(priv, 0x419004, 0x00000000);
1813 nv_wr32(priv, 0x419008, 0x00000000);
1814 nv_wr32(priv, 0x419014, 0x00000004);
1815}
1816
1817static void
1818nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1819{
1820 int chipset = nv_device(priv)->chipset;
1821
1822 /* GPC_BROADCAST.TP_BROADCAST */
1823 nv_wr32(priv, 0x419818, 0x00000000);
1824 nv_wr32(priv, 0x41983c, 0x00038bc7);
1825 nv_wr32(priv, 0x419848, 0x00000000);
1826 if (chipset == 0xc1 || chipset == 0xd9)
1827 nv_wr32(priv, 0x419864, 0x00000129);
1828 else
1829 nv_wr32(priv, 0x419864, 0x0000012a);
1830 nv_wr32(priv, 0x419888, 0x00000000);
1831 nv_wr32(priv, 0x419a00, 0x000001f0);
1832 nv_wr32(priv, 0x419a04, 0x00000001);
1833 nv_wr32(priv, 0x419a08, 0x00000023);
1834 nv_wr32(priv, 0x419a0c, 0x00020000);
1835 nv_wr32(priv, 0x419a10, 0x00000000);
1836 nv_wr32(priv, 0x419a14, 0x00000200);
1837 nv_wr32(priv, 0x419a1c, 0x00000000);
1838 nv_wr32(priv, 0x419a20, 0x00000800);
1839 if (chipset == 0xd9)
1840 nv_wr32(priv, 0x00419ac4, 0x0017f440);
1841 else if (chipset != 0xc0 && chipset != 0xc8)
1842 nv_wr32(priv, 0x00419ac4, 0x0007f440);
1843 nv_wr32(priv, 0x419b00, 0x0a418820);
1844 nv_wr32(priv, 0x419b04, 0x062080e6);
1845 nv_wr32(priv, 0x419b08, 0x020398a4);
1846 nv_wr32(priv, 0x419b0c, 0x0e629062);
1847 nv_wr32(priv, 0x419b10, 0x0a418820);
1848 nv_wr32(priv, 0x419b14, 0x000000e6);
1849 nv_wr32(priv, 0x419bd0, 0x00900103);
1850 if (chipset == 0xc1 || chipset == 0xd9)
1851 nv_wr32(priv, 0x419be0, 0x00400001);
1852 else
1853 nv_wr32(priv, 0x419be0, 0x00000001);
1854 nv_wr32(priv, 0x419be4, 0x00000000);
1855 nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
1856 nv_wr32(priv, 0x419c04, 0x00000006);
1857 nv_wr32(priv, 0x419c08, 0x00000002);
1858 nv_wr32(priv, 0x419c20, 0x00000000);
1859 if (nv_device(priv)->chipset == 0xd9) {
1860 nv_wr32(priv, 0x419c24, 0x00084210);
1861 nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
1862 nv_wr32(priv, 0x419cb0, 0x00020048);
1863 } else
1864 if (chipset == 0xce || chipset == 0xcf) {
1865 nv_wr32(priv, 0x419cb0, 0x00020048);
1866 } else {
1867 nv_wr32(priv, 0x419cb0, 0x00060048);
1868 }
1869 nv_wr32(priv, 0x419ce8, 0x00000000);
1870 nv_wr32(priv, 0x419cf4, 0x00000183);
1871 if (chipset == 0xc1 || chipset == 0xd9)
1872 nv_wr32(priv, 0x419d20, 0x12180000);
1873 else
1874 nv_wr32(priv, 0x419d20, 0x02180000);
1875 nv_wr32(priv, 0x419d24, 0x00001fff);
1876 if (chipset == 0xc1 || chipset == 0xd9)
1877 nv_wr32(priv, 0x419d44, 0x02180218);
1878 nv_wr32(priv, 0x419e04, 0x00000000);
1879 nv_wr32(priv, 0x419e08, 0x00000000);
1880 nv_wr32(priv, 0x419e0c, 0x00000000);
1881 nv_wr32(priv, 0x419e10, 0x00000002);
1882 nv_wr32(priv, 0x419e44, 0x001beff2);
1883 nv_wr32(priv, 0x419e48, 0x00000000);
1884 nv_wr32(priv, 0x419e4c, 0x0000000f);
1885 nv_wr32(priv, 0x419e50, 0x00000000);
1886 nv_wr32(priv, 0x419e54, 0x00000000);
1887 nv_wr32(priv, 0x419e58, 0x00000000);
1888 nv_wr32(priv, 0x419e5c, 0x00000000);
1889 nv_wr32(priv, 0x419e60, 0x00000000);
1890 nv_wr32(priv, 0x419e64, 0x00000000);
1891 nv_wr32(priv, 0x419e68, 0x00000000);
1892 nv_wr32(priv, 0x419e6c, 0x00000000);
1893 nv_wr32(priv, 0x419e70, 0x00000000);
1894 nv_wr32(priv, 0x419e74, 0x00000000);
1895 nv_wr32(priv, 0x419e78, 0x00000000);
1896 nv_wr32(priv, 0x419e7c, 0x00000000);
1897 nv_wr32(priv, 0x419e80, 0x00000000);
1898 nv_wr32(priv, 0x419e84, 0x00000000);
1899 nv_wr32(priv, 0x419e88, 0x00000000);
1900 nv_wr32(priv, 0x419e8c, 0x00000000);
1901 nv_wr32(priv, 0x419e90, 0x00000000);
1902 nv_wr32(priv, 0x419e98, 0x00000000);
1903 if (chipset != 0xc0 && chipset != 0xc8)
1904 nv_wr32(priv, 0x419ee0, 0x00011110);
1905 nv_wr32(priv, 0x419f50, 0x00000000);
1906 nv_wr32(priv, 0x419f54, 0x00000000);
1907 if (chipset != 0xc0 && chipset != 0xc8)
1908 nv_wr32(priv, 0x419f58, 0x00000000);
1909}
1910
1911int
1912nvc0_grctx_generate(struct nvc0_graph_priv *priv)
1913{
1914 struct nvc0_grctx info;
1915 int ret, i, gpc, tpc, id;
1916 u32 fermi = nvc0_graph_class(priv);
1917 u32 r000260, tmp;
1918
1919 ret = nvc0_grctx_init(priv, &info);
1920 if (ret)
1921 return ret;
1922
1923 r000260 = nv_rd32(priv, 0x000260);
1924 nv_wr32(priv, 0x000260, r000260 & ~1);
1925 nv_wr32(priv, 0x400208, 0x00000000);
1926
1927 nvc0_grctx_generate_dispatch(priv);
1928 nvc0_grctx_generate_macro(priv);
1929 nvc0_grctx_generate_m2mf(priv);
1930 nvc0_grctx_generate_unk47xx(priv);
1931 nvc0_grctx_generate_shaders(priv);
1932 nvc0_grctx_generate_unk60xx(priv);
1933 nvc0_grctx_generate_unk64xx(priv);
1934 nvc0_grctx_generate_tpbus(priv);
1935 nvc0_grctx_generate_ccache(priv);
1936 nvc0_grctx_generate_rop(priv);
1937 nvc0_grctx_generate_gpc(priv);
1938 nvc0_grctx_generate_tp(priv);
1939
1940 nv_wr32(priv, 0x404154, 0x00000000);
1941
1942 /* generate per-context mmio list data */
1943 mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
1944 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
1945 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
1946 mmio_list(0x408004, 0x00000000, 8, 0);
1947 mmio_list(0x408008, 0x80000018, 0, 0);
1948 mmio_list(0x40800c, 0x00000000, 8, 1);
1949 mmio_list(0x408010, 0x80000000, 0, 0);
1950 mmio_list(0x418810, 0x80000000, 12, 2);
1951 mmio_list(0x419848, 0x10000000, 12, 2);
1952 mmio_list(0x419004, 0x00000000, 8, 1);
1953 mmio_list(0x419008, 0x00000000, 0, 0);
1954 mmio_list(0x418808, 0x00000000, 8, 0);
1955 mmio_list(0x41880c, 0x80000018, 0, 0);
1956 if (nv_device(priv)->chipset != 0xc1) {
1957 tmp = 0x02180000;
1958 mmio_list(0x405830, tmp, 0, 0);
1959 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1960 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1961 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1962 mmio_list(reg, tmp, 0, 0);
1963 tmp += 0x0324;
1964 }
1965 }
1966 } else {
1967 tmp = 0x02180000;
1968 mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
1969 mmio_list(0x4064c4, 0x0086ffff, 0, 0);
1970 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1971 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1972 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1973 mmio_list(reg, 0x10000000 | tmp, 0, 0);
1974 tmp += 0x0324;
1975 }
1976 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1977 u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
1978 mmio_list(reg, tmp, 0, 0);
1979 tmp += 0x0324;
1980 }
1981 }
1982 }
1983
1984 for (tpc = 0, id = 0; tpc < 4; tpc++) {
1985 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1986 if (tpc < priv->tpc_nr[gpc]) {
1987 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
1988 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
1989 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
1990 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
1991 id++;
1992 }
1993
1994 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
1995 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
1996 }
1997 }
1998
1999 tmp = 0;
2000 for (i = 0; i < priv->gpc_nr; i++)
2001 tmp |= priv->tpc_nr[i] << (i * 4);
2002 nv_wr32(priv, 0x406028, tmp);
2003 nv_wr32(priv, 0x405870, tmp);
2004
2005 nv_wr32(priv, 0x40602c, 0x00000000);
2006 nv_wr32(priv, 0x405874, 0x00000000);
2007 nv_wr32(priv, 0x406030, 0x00000000);
2008 nv_wr32(priv, 0x405878, 0x00000000);
2009 nv_wr32(priv, 0x406034, 0x00000000);
2010 nv_wr32(priv, 0x40587c, 0x00000000);
2011
2012 if (1) {
2013 u8 tpcnr[GPC_MAX], data[TPC_MAX];
2014
2015 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2016 memset(data, 0x1f, sizeof(data));
2017
2018 gpc = -1;
2019 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2020 do {
2021 gpc = (gpc + 1) % priv->gpc_nr;
2022 } while (!tpcnr[gpc]);
2023 tpcnr[gpc]--;
2024 data[tpc] = gpc;
2025 }
2026
2027 for (i = 0; i < 4; i++)
2028 nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
2029 }
2030
2031 if (1) {
2032 u32 data[6] = {}, data2[2] = {};
2033 u8 tpcnr[GPC_MAX];
2034 u8 shift, ntpcv;
2035
2036 /* calculate first set of magics */
2037 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2038
2039 gpc = -1;
2040 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2041 do {
2042 gpc = (gpc + 1) % priv->gpc_nr;
2043 } while (!tpcnr[gpc]);
2044 tpcnr[gpc]--;
2045
2046 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2047 }
2048
2049 for (; tpc < 32; tpc++)
2050 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2051
2052 /* and the second... */
2053 shift = 0;
2054 ntpcv = priv->tpc_total;
2055 while (!(ntpcv & (1 << 4))) {
2056 ntpcv <<= 1;
2057 shift++;
2058 }
2059
2060 data2[0] = (ntpcv << 16);
2061 data2[0] |= (shift << 21);
2062 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2063 for (i = 1; i < 7; i++)
2064 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2065
2066 /* GPC_BROADCAST */
2067 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
2068 priv->magic_not_rop_nr);
2069 for (i = 0; i < 6; i++)
2070 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2071
2072 /* GPC_BROADCAST.TP_BROADCAST */
2073 nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
2074 priv->magic_not_rop_nr |
2075 data2[0]);
2076 nv_wr32(priv, 0x419be4, data2[1]);
2077 for (i = 0; i < 6; i++)
2078 nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
2079
2080 /* UNK78xx */
2081 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
2082 priv->magic_not_rop_nr);
2083 for (i = 0; i < 6; i++)
2084 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2085 }
2086
2087 if (1) {
2088 u32 tpc_mask = 0, tpc_set = 0;
2089 u8 tpcnr[GPC_MAX], a, b;
2090
2091 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2092 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2093 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2094
2095 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2096 a = (i * (priv->tpc_total - 1)) / 32;
2097 if (a != b) {
2098 b = a;
2099 do {
2100 gpc = (gpc + 1) % priv->gpc_nr;
2101 } while (!tpcnr[gpc]);
2102 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2103
2104 tpc_set |= 1 << ((gpc * 8) + tpc);
2105 }
2106
2107 nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
2108 nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2109 }
2110 }
2111
2112 nv_wr32(priv, 0x400208, 0x80000000);
2113
2114 nv_icmd(priv, 0x00001000, 0x00000004);
2115 nv_icmd(priv, 0x000000a9, 0x0000ffff);
2116 nv_icmd(priv, 0x00000038, 0x0fac6881);
2117 nv_icmd(priv, 0x0000003d, 0x00000001);
2118 nv_icmd(priv, 0x000000e8, 0x00000400);
2119 nv_icmd(priv, 0x000000e9, 0x00000400);
2120 nv_icmd(priv, 0x000000ea, 0x00000400);
2121 nv_icmd(priv, 0x000000eb, 0x00000400);
2122 nv_icmd(priv, 0x000000ec, 0x00000400);
2123 nv_icmd(priv, 0x000000ed, 0x00000400);
2124 nv_icmd(priv, 0x000000ee, 0x00000400);
2125 nv_icmd(priv, 0x000000ef, 0x00000400);
2126 nv_icmd(priv, 0x00000078, 0x00000300);
2127 nv_icmd(priv, 0x00000079, 0x00000300);
2128 nv_icmd(priv, 0x0000007a, 0x00000300);
2129 nv_icmd(priv, 0x0000007b, 0x00000300);
2130 nv_icmd(priv, 0x0000007c, 0x00000300);
2131 nv_icmd(priv, 0x0000007d, 0x00000300);
2132 nv_icmd(priv, 0x0000007e, 0x00000300);
2133 nv_icmd(priv, 0x0000007f, 0x00000300);
2134 nv_icmd(priv, 0x00000050, 0x00000011);
2135 nv_icmd(priv, 0x00000058, 0x00000008);
2136 nv_icmd(priv, 0x00000059, 0x00000008);
2137 nv_icmd(priv, 0x0000005a, 0x00000008);
2138 nv_icmd(priv, 0x0000005b, 0x00000008);
2139 nv_icmd(priv, 0x0000005c, 0x00000008);
2140 nv_icmd(priv, 0x0000005d, 0x00000008);
2141 nv_icmd(priv, 0x0000005e, 0x00000008);
2142 nv_icmd(priv, 0x0000005f, 0x00000008);
2143 nv_icmd(priv, 0x00000208, 0x00000001);
2144 nv_icmd(priv, 0x00000209, 0x00000001);
2145 nv_icmd(priv, 0x0000020a, 0x00000001);
2146 nv_icmd(priv, 0x0000020b, 0x00000001);
2147 nv_icmd(priv, 0x0000020c, 0x00000001);
2148 nv_icmd(priv, 0x0000020d, 0x00000001);
2149 nv_icmd(priv, 0x0000020e, 0x00000001);
2150 nv_icmd(priv, 0x0000020f, 0x00000001);
2151 nv_icmd(priv, 0x00000081, 0x00000001);
2152 nv_icmd(priv, 0x00000085, 0x00000004);
2153 nv_icmd(priv, 0x00000088, 0x00000400);
2154 nv_icmd(priv, 0x00000090, 0x00000300);
2155 nv_icmd(priv, 0x00000098, 0x00001001);
2156 nv_icmd(priv, 0x000000e3, 0x00000001);
2157 nv_icmd(priv, 0x000000da, 0x00000001);
2158 nv_icmd(priv, 0x000000f8, 0x00000003);
2159 nv_icmd(priv, 0x000000fa, 0x00000001);
2160 nv_icmd(priv, 0x0000009f, 0x0000ffff);
2161 nv_icmd(priv, 0x000000a0, 0x0000ffff);
2162 nv_icmd(priv, 0x000000a1, 0x0000ffff);
2163 nv_icmd(priv, 0x000000a2, 0x0000ffff);
2164 nv_icmd(priv, 0x000000b1, 0x00000001);
2165 nv_icmd(priv, 0x000000b2, 0x00000000);
2166 nv_icmd(priv, 0x000000b3, 0x00000000);
2167 nv_icmd(priv, 0x000000b4, 0x00000000);
2168 nv_icmd(priv, 0x000000b5, 0x00000000);
2169 nv_icmd(priv, 0x000000b6, 0x00000000);
2170 nv_icmd(priv, 0x000000b7, 0x00000000);
2171 nv_icmd(priv, 0x000000b8, 0x00000000);
2172 nv_icmd(priv, 0x000000b9, 0x00000000);
2173 nv_icmd(priv, 0x000000ba, 0x00000000);
2174 nv_icmd(priv, 0x000000bb, 0x00000000);
2175 nv_icmd(priv, 0x000000bc, 0x00000000);
2176 nv_icmd(priv, 0x000000bd, 0x00000000);
2177 nv_icmd(priv, 0x000000be, 0x00000000);
2178 nv_icmd(priv, 0x000000bf, 0x00000000);
2179 nv_icmd(priv, 0x000000c0, 0x00000000);
2180 nv_icmd(priv, 0x000000c1, 0x00000000);
2181 nv_icmd(priv, 0x000000c2, 0x00000000);
2182 nv_icmd(priv, 0x000000c3, 0x00000000);
2183 nv_icmd(priv, 0x000000c4, 0x00000000);
2184 nv_icmd(priv, 0x000000c5, 0x00000000);
2185 nv_icmd(priv, 0x000000c6, 0x00000000);
2186 nv_icmd(priv, 0x000000c7, 0x00000000);
2187 nv_icmd(priv, 0x000000c8, 0x00000000);
2188 nv_icmd(priv, 0x000000c9, 0x00000000);
2189 nv_icmd(priv, 0x000000ca, 0x00000000);
2190 nv_icmd(priv, 0x000000cb, 0x00000000);
2191 nv_icmd(priv, 0x000000cc, 0x00000000);
2192 nv_icmd(priv, 0x000000cd, 0x00000000);
2193 nv_icmd(priv, 0x000000ce, 0x00000000);
2194 nv_icmd(priv, 0x000000cf, 0x00000000);
2195 nv_icmd(priv, 0x000000d0, 0x00000000);
2196 nv_icmd(priv, 0x000000d1, 0x00000000);
2197 nv_icmd(priv, 0x000000d2, 0x00000000);
2198 nv_icmd(priv, 0x000000d3, 0x00000000);
2199 nv_icmd(priv, 0x000000d4, 0x00000000);
2200 nv_icmd(priv, 0x000000d5, 0x00000000);
2201 nv_icmd(priv, 0x000000d6, 0x00000000);
2202 nv_icmd(priv, 0x000000d7, 0x00000000);
2203 nv_icmd(priv, 0x000000d8, 0x00000000);
2204 nv_icmd(priv, 0x000000d9, 0x00000000);
2205 nv_icmd(priv, 0x00000210, 0x00000040);
2206 nv_icmd(priv, 0x00000211, 0x00000040);
2207 nv_icmd(priv, 0x00000212, 0x00000040);
2208 nv_icmd(priv, 0x00000213, 0x00000040);
2209 nv_icmd(priv, 0x00000214, 0x00000040);
2210 nv_icmd(priv, 0x00000215, 0x00000040);
2211 nv_icmd(priv, 0x00000216, 0x00000040);
2212 nv_icmd(priv, 0x00000217, 0x00000040);
2213 if (nv_device(priv)->chipset == 0xd9) {
2214 for (i = 0x0400; i <= 0x0417; i++)
2215 nv_icmd(priv, i, 0x00000040);
2216 }
2217 nv_icmd(priv, 0x00000218, 0x0000c080);
2218 nv_icmd(priv, 0x00000219, 0x0000c080);
2219 nv_icmd(priv, 0x0000021a, 0x0000c080);
2220 nv_icmd(priv, 0x0000021b, 0x0000c080);
2221 nv_icmd(priv, 0x0000021c, 0x0000c080);
2222 nv_icmd(priv, 0x0000021d, 0x0000c080);
2223 nv_icmd(priv, 0x0000021e, 0x0000c080);
2224 nv_icmd(priv, 0x0000021f, 0x0000c080);
2225 if (nv_device(priv)->chipset == 0xd9) {
2226 for (i = 0x0440; i <= 0x0457; i++)
2227 nv_icmd(priv, i, 0x0000c080);
2228 }
2229 nv_icmd(priv, 0x000000ad, 0x0000013e);
2230 nv_icmd(priv, 0x000000e1, 0x00000010);
2231 nv_icmd(priv, 0x00000290, 0x00000000);
2232 nv_icmd(priv, 0x00000291, 0x00000000);
2233 nv_icmd(priv, 0x00000292, 0x00000000);
2234 nv_icmd(priv, 0x00000293, 0x00000000);
2235 nv_icmd(priv, 0x00000294, 0x00000000);
2236 nv_icmd(priv, 0x00000295, 0x00000000);
2237 nv_icmd(priv, 0x00000296, 0x00000000);
2238 nv_icmd(priv, 0x00000297, 0x00000000);
2239 nv_icmd(priv, 0x00000298, 0x00000000);
2240 nv_icmd(priv, 0x00000299, 0x00000000);
2241 nv_icmd(priv, 0x0000029a, 0x00000000);
2242 nv_icmd(priv, 0x0000029b, 0x00000000);
2243 nv_icmd(priv, 0x0000029c, 0x00000000);
2244 nv_icmd(priv, 0x0000029d, 0x00000000);
2245 nv_icmd(priv, 0x0000029e, 0x00000000);
2246 nv_icmd(priv, 0x0000029f, 0x00000000);
2247 nv_icmd(priv, 0x000003b0, 0x00000000);
2248 nv_icmd(priv, 0x000003b1, 0x00000000);
2249 nv_icmd(priv, 0x000003b2, 0x00000000);
2250 nv_icmd(priv, 0x000003b3, 0x00000000);
2251 nv_icmd(priv, 0x000003b4, 0x00000000);
2252 nv_icmd(priv, 0x000003b5, 0x00000000);
2253 nv_icmd(priv, 0x000003b6, 0x00000000);
2254 nv_icmd(priv, 0x000003b7, 0x00000000);
2255 nv_icmd(priv, 0x000003b8, 0x00000000);
2256 nv_icmd(priv, 0x000003b9, 0x00000000);
2257 nv_icmd(priv, 0x000003ba, 0x00000000);
2258 nv_icmd(priv, 0x000003bb, 0x00000000);
2259 nv_icmd(priv, 0x000003bc, 0x00000000);
2260 nv_icmd(priv, 0x000003bd, 0x00000000);
2261 nv_icmd(priv, 0x000003be, 0x00000000);
2262 nv_icmd(priv, 0x000003bf, 0x00000000);
2263 nv_icmd(priv, 0x000002a0, 0x00000000);
2264 nv_icmd(priv, 0x000002a1, 0x00000000);
2265 nv_icmd(priv, 0x000002a2, 0x00000000);
2266 nv_icmd(priv, 0x000002a3, 0x00000000);
2267 nv_icmd(priv, 0x000002a4, 0x00000000);
2268 nv_icmd(priv, 0x000002a5, 0x00000000);
2269 nv_icmd(priv, 0x000002a6, 0x00000000);
2270 nv_icmd(priv, 0x000002a7, 0x00000000);
2271 nv_icmd(priv, 0x000002a8, 0x00000000);
2272 nv_icmd(priv, 0x000002a9, 0x00000000);
2273 nv_icmd(priv, 0x000002aa, 0x00000000);
2274 nv_icmd(priv, 0x000002ab, 0x00000000);
2275 nv_icmd(priv, 0x000002ac, 0x00000000);
2276 nv_icmd(priv, 0x000002ad, 0x00000000);
2277 nv_icmd(priv, 0x000002ae, 0x00000000);
2278 nv_icmd(priv, 0x000002af, 0x00000000);
2279 nv_icmd(priv, 0x00000420, 0x00000000);
2280 nv_icmd(priv, 0x00000421, 0x00000000);
2281 nv_icmd(priv, 0x00000422, 0x00000000);
2282 nv_icmd(priv, 0x00000423, 0x00000000);
2283 nv_icmd(priv, 0x00000424, 0x00000000);
2284 nv_icmd(priv, 0x00000425, 0x00000000);
2285 nv_icmd(priv, 0x00000426, 0x00000000);
2286 nv_icmd(priv, 0x00000427, 0x00000000);
2287 nv_icmd(priv, 0x00000428, 0x00000000);
2288 nv_icmd(priv, 0x00000429, 0x00000000);
2289 nv_icmd(priv, 0x0000042a, 0x00000000);
2290 nv_icmd(priv, 0x0000042b, 0x00000000);
2291 nv_icmd(priv, 0x0000042c, 0x00000000);
2292 nv_icmd(priv, 0x0000042d, 0x00000000);
2293 nv_icmd(priv, 0x0000042e, 0x00000000);
2294 nv_icmd(priv, 0x0000042f, 0x00000000);
2295 nv_icmd(priv, 0x000002b0, 0x00000000);
2296 nv_icmd(priv, 0x000002b1, 0x00000000);
2297 nv_icmd(priv, 0x000002b2, 0x00000000);
2298 nv_icmd(priv, 0x000002b3, 0x00000000);
2299 nv_icmd(priv, 0x000002b4, 0x00000000);
2300 nv_icmd(priv, 0x000002b5, 0x00000000);
2301 nv_icmd(priv, 0x000002b6, 0x00000000);
2302 nv_icmd(priv, 0x000002b7, 0x00000000);
2303 nv_icmd(priv, 0x000002b8, 0x00000000);
2304 nv_icmd(priv, 0x000002b9, 0x00000000);
2305 nv_icmd(priv, 0x000002ba, 0x00000000);
2306 nv_icmd(priv, 0x000002bb, 0x00000000);
2307 nv_icmd(priv, 0x000002bc, 0x00000000);
2308 nv_icmd(priv, 0x000002bd, 0x00000000);
2309 nv_icmd(priv, 0x000002be, 0x00000000);
2310 nv_icmd(priv, 0x000002bf, 0x00000000);
2311 nv_icmd(priv, 0x00000430, 0x00000000);
2312 nv_icmd(priv, 0x00000431, 0x00000000);
2313 nv_icmd(priv, 0x00000432, 0x00000000);
2314 nv_icmd(priv, 0x00000433, 0x00000000);
2315 nv_icmd(priv, 0x00000434, 0x00000000);
2316 nv_icmd(priv, 0x00000435, 0x00000000);
2317 nv_icmd(priv, 0x00000436, 0x00000000);
2318 nv_icmd(priv, 0x00000437, 0x00000000);
2319 nv_icmd(priv, 0x00000438, 0x00000000);
2320 nv_icmd(priv, 0x00000439, 0x00000000);
2321 nv_icmd(priv, 0x0000043a, 0x00000000);
2322 nv_icmd(priv, 0x0000043b, 0x00000000);
2323 nv_icmd(priv, 0x0000043c, 0x00000000);
2324 nv_icmd(priv, 0x0000043d, 0x00000000);
2325 nv_icmd(priv, 0x0000043e, 0x00000000);
2326 nv_icmd(priv, 0x0000043f, 0x00000000);
2327 nv_icmd(priv, 0x000002c0, 0x00000000);
2328 nv_icmd(priv, 0x000002c1, 0x00000000);
2329 nv_icmd(priv, 0x000002c2, 0x00000000);
2330 nv_icmd(priv, 0x000002c3, 0x00000000);
2331 nv_icmd(priv, 0x000002c4, 0x00000000);
2332 nv_icmd(priv, 0x000002c5, 0x00000000);
2333 nv_icmd(priv, 0x000002c6, 0x00000000);
2334 nv_icmd(priv, 0x000002c7, 0x00000000);
2335 nv_icmd(priv, 0x000002c8, 0x00000000);
2336 nv_icmd(priv, 0x000002c9, 0x00000000);
2337 nv_icmd(priv, 0x000002ca, 0x00000000);
2338 nv_icmd(priv, 0x000002cb, 0x00000000);
2339 nv_icmd(priv, 0x000002cc, 0x00000000);
2340 nv_icmd(priv, 0x000002cd, 0x00000000);
2341 nv_icmd(priv, 0x000002ce, 0x00000000);
2342 nv_icmd(priv, 0x000002cf, 0x00000000);
2343 nv_icmd(priv, 0x000004d0, 0x00000000);
2344 nv_icmd(priv, 0x000004d1, 0x00000000);
2345 nv_icmd(priv, 0x000004d2, 0x00000000);
2346 nv_icmd(priv, 0x000004d3, 0x00000000);
2347 nv_icmd(priv, 0x000004d4, 0x00000000);
2348 nv_icmd(priv, 0x000004d5, 0x00000000);
2349 nv_icmd(priv, 0x000004d6, 0x00000000);
2350 nv_icmd(priv, 0x000004d7, 0x00000000);
2351 nv_icmd(priv, 0x000004d8, 0x00000000);
2352 nv_icmd(priv, 0x000004d9, 0x00000000);
2353 nv_icmd(priv, 0x000004da, 0x00000000);
2354 nv_icmd(priv, 0x000004db, 0x00000000);
2355 nv_icmd(priv, 0x000004dc, 0x00000000);
2356 nv_icmd(priv, 0x000004dd, 0x00000000);
2357 nv_icmd(priv, 0x000004de, 0x00000000);
2358 nv_icmd(priv, 0x000004df, 0x00000000);
2359 nv_icmd(priv, 0x00000720, 0x00000000);
2360 nv_icmd(priv, 0x00000721, 0x00000000);
2361 nv_icmd(priv, 0x00000722, 0x00000000);
2362 nv_icmd(priv, 0x00000723, 0x00000000);
2363 nv_icmd(priv, 0x00000724, 0x00000000);
2364 nv_icmd(priv, 0x00000725, 0x00000000);
2365 nv_icmd(priv, 0x00000726, 0x00000000);
2366 nv_icmd(priv, 0x00000727, 0x00000000);
2367 nv_icmd(priv, 0x00000728, 0x00000000);
2368 nv_icmd(priv, 0x00000729, 0x00000000);
2369 nv_icmd(priv, 0x0000072a, 0x00000000);
2370 nv_icmd(priv, 0x0000072b, 0x00000000);
2371 nv_icmd(priv, 0x0000072c, 0x00000000);
2372 nv_icmd(priv, 0x0000072d, 0x00000000);
2373 nv_icmd(priv, 0x0000072e, 0x00000000);
2374 nv_icmd(priv, 0x0000072f, 0x00000000);
2375 nv_icmd(priv, 0x000008c0, 0x00000000);
2376 nv_icmd(priv, 0x000008c1, 0x00000000);
2377 nv_icmd(priv, 0x000008c2, 0x00000000);
2378 nv_icmd(priv, 0x000008c3, 0x00000000);
2379 nv_icmd(priv, 0x000008c4, 0x00000000);
2380 nv_icmd(priv, 0x000008c5, 0x00000000);
2381 nv_icmd(priv, 0x000008c6, 0x00000000);
2382 nv_icmd(priv, 0x000008c7, 0x00000000);
2383 nv_icmd(priv, 0x000008c8, 0x00000000);
2384 nv_icmd(priv, 0x000008c9, 0x00000000);
2385 nv_icmd(priv, 0x000008ca, 0x00000000);
2386 nv_icmd(priv, 0x000008cb, 0x00000000);
2387 nv_icmd(priv, 0x000008cc, 0x00000000);
2388 nv_icmd(priv, 0x000008cd, 0x00000000);
2389 nv_icmd(priv, 0x000008ce, 0x00000000);
2390 nv_icmd(priv, 0x000008cf, 0x00000000);
2391 nv_icmd(priv, 0x00000890, 0x00000000);
2392 nv_icmd(priv, 0x00000891, 0x00000000);
2393 nv_icmd(priv, 0x00000892, 0x00000000);
2394 nv_icmd(priv, 0x00000893, 0x00000000);
2395 nv_icmd(priv, 0x00000894, 0x00000000);
2396 nv_icmd(priv, 0x00000895, 0x00000000);
2397 nv_icmd(priv, 0x00000896, 0x00000000);
2398 nv_icmd(priv, 0x00000897, 0x00000000);
2399 nv_icmd(priv, 0x00000898, 0x00000000);
2400 nv_icmd(priv, 0x00000899, 0x00000000);
2401 nv_icmd(priv, 0x0000089a, 0x00000000);
2402 nv_icmd(priv, 0x0000089b, 0x00000000);
2403 nv_icmd(priv, 0x0000089c, 0x00000000);
2404 nv_icmd(priv, 0x0000089d, 0x00000000);
2405 nv_icmd(priv, 0x0000089e, 0x00000000);
2406 nv_icmd(priv, 0x0000089f, 0x00000000);
2407 nv_icmd(priv, 0x000008e0, 0x00000000);
2408 nv_icmd(priv, 0x000008e1, 0x00000000);
2409 nv_icmd(priv, 0x000008e2, 0x00000000);
2410 nv_icmd(priv, 0x000008e3, 0x00000000);
2411 nv_icmd(priv, 0x000008e4, 0x00000000);
2412 nv_icmd(priv, 0x000008e5, 0x00000000);
2413 nv_icmd(priv, 0x000008e6, 0x00000000);
2414 nv_icmd(priv, 0x000008e7, 0x00000000);
2415 nv_icmd(priv, 0x000008e8, 0x00000000);
2416 nv_icmd(priv, 0x000008e9, 0x00000000);
2417 nv_icmd(priv, 0x000008ea, 0x00000000);
2418 nv_icmd(priv, 0x000008eb, 0x00000000);
2419 nv_icmd(priv, 0x000008ec, 0x00000000);
2420 nv_icmd(priv, 0x000008ed, 0x00000000);
2421 nv_icmd(priv, 0x000008ee, 0x00000000);
2422 nv_icmd(priv, 0x000008ef, 0x00000000);
2423 nv_icmd(priv, 0x000008a0, 0x00000000);
2424 nv_icmd(priv, 0x000008a1, 0x00000000);
2425 nv_icmd(priv, 0x000008a2, 0x00000000);
2426 nv_icmd(priv, 0x000008a3, 0x00000000);
2427 nv_icmd(priv, 0x000008a4, 0x00000000);
2428 nv_icmd(priv, 0x000008a5, 0x00000000);
2429 nv_icmd(priv, 0x000008a6, 0x00000000);
2430 nv_icmd(priv, 0x000008a7, 0x00000000);
2431 nv_icmd(priv, 0x000008a8, 0x00000000);
2432 nv_icmd(priv, 0x000008a9, 0x00000000);
2433 nv_icmd(priv, 0x000008aa, 0x00000000);
2434 nv_icmd(priv, 0x000008ab, 0x00000000);
2435 nv_icmd(priv, 0x000008ac, 0x00000000);
2436 nv_icmd(priv, 0x000008ad, 0x00000000);
2437 nv_icmd(priv, 0x000008ae, 0x00000000);
2438 nv_icmd(priv, 0x000008af, 0x00000000);
2439 nv_icmd(priv, 0x000008f0, 0x00000000);
2440 nv_icmd(priv, 0x000008f1, 0x00000000);
2441 nv_icmd(priv, 0x000008f2, 0x00000000);
2442 nv_icmd(priv, 0x000008f3, 0x00000000);
2443 nv_icmd(priv, 0x000008f4, 0x00000000);
2444 nv_icmd(priv, 0x000008f5, 0x00000000);
2445 nv_icmd(priv, 0x000008f6, 0x00000000);
2446 nv_icmd(priv, 0x000008f7, 0x00000000);
2447 nv_icmd(priv, 0x000008f8, 0x00000000);
2448 nv_icmd(priv, 0x000008f9, 0x00000000);
2449 nv_icmd(priv, 0x000008fa, 0x00000000);
2450 nv_icmd(priv, 0x000008fb, 0x00000000);
2451 nv_icmd(priv, 0x000008fc, 0x00000000);
2452 nv_icmd(priv, 0x000008fd, 0x00000000);
2453 nv_icmd(priv, 0x000008fe, 0x00000000);
2454 nv_icmd(priv, 0x000008ff, 0x00000000);
2455 nv_icmd(priv, 0x0000094c, 0x000000ff);
2456 nv_icmd(priv, 0x0000094d, 0xffffffff);
2457 nv_icmd(priv, 0x0000094e, 0x00000002);
2458 nv_icmd(priv, 0x000002ec, 0x00000001);
2459 nv_icmd(priv, 0x00000303, 0x00000001);
2460 nv_icmd(priv, 0x000002e6, 0x00000001);
2461 nv_icmd(priv, 0x00000466, 0x00000052);
2462 nv_icmd(priv, 0x00000301, 0x3f800000);
2463 nv_icmd(priv, 0x00000304, 0x30201000);
2464 nv_icmd(priv, 0x00000305, 0x70605040);
2465 nv_icmd(priv, 0x00000306, 0xb8a89888);
2466 nv_icmd(priv, 0x00000307, 0xf8e8d8c8);
2467 nv_icmd(priv, 0x0000030a, 0x00ffff00);
2468 nv_icmd(priv, 0x0000030b, 0x0000001a);
2469 nv_icmd(priv, 0x0000030c, 0x00000001);
2470 nv_icmd(priv, 0x00000318, 0x00000001);
2471 nv_icmd(priv, 0x00000340, 0x00000000);
2472 nv_icmd(priv, 0x00000375, 0x00000001);
2473 nv_icmd(priv, 0x00000351, 0x00000100);
2474 nv_icmd(priv, 0x0000037d, 0x00000006);
2475 nv_icmd(priv, 0x000003a0, 0x00000002);
2476 nv_icmd(priv, 0x000003aa, 0x00000001);
2477 nv_icmd(priv, 0x000003a9, 0x00000001);
2478 nv_icmd(priv, 0x00000380, 0x00000001);
2479 nv_icmd(priv, 0x00000360, 0x00000040);
2480 nv_icmd(priv, 0x00000366, 0x00000000);
2481 nv_icmd(priv, 0x00000367, 0x00000000);
2482 nv_icmd(priv, 0x00000368, 0x00001fff);
2483 nv_icmd(priv, 0x00000370, 0x00000000);
2484 nv_icmd(priv, 0x00000371, 0x00000000);
2485 nv_icmd(priv, 0x00000372, 0x003fffff);
2486 nv_icmd(priv, 0x0000037a, 0x00000012);
2487 nv_icmd(priv, 0x000005e0, 0x00000022);
2488 nv_icmd(priv, 0x000005e1, 0x00000022);
2489 nv_icmd(priv, 0x000005e2, 0x00000022);
2490 nv_icmd(priv, 0x000005e3, 0x00000022);
2491 nv_icmd(priv, 0x000005e4, 0x00000022);
2492 nv_icmd(priv, 0x00000619, 0x00000003);
2493 nv_icmd(priv, 0x00000811, 0x00000003);
2494 nv_icmd(priv, 0x00000812, 0x00000004);
2495 nv_icmd(priv, 0x00000813, 0x00000006);
2496 nv_icmd(priv, 0x00000814, 0x00000008);
2497 nv_icmd(priv, 0x00000815, 0x0000000b);
2498 nv_icmd(priv, 0x00000800, 0x00000001);
2499 nv_icmd(priv, 0x00000801, 0x00000001);
2500 nv_icmd(priv, 0x00000802, 0x00000001);
2501 nv_icmd(priv, 0x00000803, 0x00000001);
2502 nv_icmd(priv, 0x00000804, 0x00000001);
2503 nv_icmd(priv, 0x00000805, 0x00000001);
2504 nv_icmd(priv, 0x00000632, 0x00000001);
2505 nv_icmd(priv, 0x00000633, 0x00000002);
2506 nv_icmd(priv, 0x00000634, 0x00000003);
2507 nv_icmd(priv, 0x00000635, 0x00000004);
2508 nv_icmd(priv, 0x00000654, 0x3f800000);
2509 nv_icmd(priv, 0x00000657, 0x3f800000);
2510 nv_icmd(priv, 0x00000655, 0x3f800000);
2511 nv_icmd(priv, 0x00000656, 0x3f800000);
2512 nv_icmd(priv, 0x000006cd, 0x3f800000);
2513 nv_icmd(priv, 0x000007f5, 0x3f800000);
2514 nv_icmd(priv, 0x000007dc, 0x39291909);
2515 nv_icmd(priv, 0x000007dd, 0x79695949);
2516 nv_icmd(priv, 0x000007de, 0xb9a99989);
2517 nv_icmd(priv, 0x000007df, 0xf9e9d9c9);
2518 nv_icmd(priv, 0x000007e8, 0x00003210);
2519 nv_icmd(priv, 0x000007e9, 0x00007654);
2520 nv_icmd(priv, 0x000007ea, 0x00000098);
2521 nv_icmd(priv, 0x000007ec, 0x39291909);
2522 nv_icmd(priv, 0x000007ed, 0x79695949);
2523 nv_icmd(priv, 0x000007ee, 0xb9a99989);
2524 nv_icmd(priv, 0x000007ef, 0xf9e9d9c9);
2525 nv_icmd(priv, 0x000007f0, 0x00003210);
2526 nv_icmd(priv, 0x000007f1, 0x00007654);
2527 nv_icmd(priv, 0x000007f2, 0x00000098);
2528 nv_icmd(priv, 0x000005a5, 0x00000001);
2529 nv_icmd(priv, 0x00000980, 0x00000000);
2530 nv_icmd(priv, 0x00000981, 0x00000000);
2531 nv_icmd(priv, 0x00000982, 0x00000000);
2532 nv_icmd(priv, 0x00000983, 0x00000000);
2533 nv_icmd(priv, 0x00000984, 0x00000000);
2534 nv_icmd(priv, 0x00000985, 0x00000000);
2535 nv_icmd(priv, 0x00000986, 0x00000000);
2536 nv_icmd(priv, 0x00000987, 0x00000000);
2537 nv_icmd(priv, 0x00000988, 0x00000000);
2538 nv_icmd(priv, 0x00000989, 0x00000000);
2539 nv_icmd(priv, 0x0000098a, 0x00000000);
2540 nv_icmd(priv, 0x0000098b, 0x00000000);
2541 nv_icmd(priv, 0x0000098c, 0x00000000);
2542 nv_icmd(priv, 0x0000098d, 0x00000000);
2543 nv_icmd(priv, 0x0000098e, 0x00000000);
2544 nv_icmd(priv, 0x0000098f, 0x00000000);
2545 nv_icmd(priv, 0x00000990, 0x00000000);
2546 nv_icmd(priv, 0x00000991, 0x00000000);
2547 nv_icmd(priv, 0x00000992, 0x00000000);
2548 nv_icmd(priv, 0x00000993, 0x00000000);
2549 nv_icmd(priv, 0x00000994, 0x00000000);
2550 nv_icmd(priv, 0x00000995, 0x00000000);
2551 nv_icmd(priv, 0x00000996, 0x00000000);
2552 nv_icmd(priv, 0x00000997, 0x00000000);
2553 nv_icmd(priv, 0x00000998, 0x00000000);
2554 nv_icmd(priv, 0x00000999, 0x00000000);
2555 nv_icmd(priv, 0x0000099a, 0x00000000);
2556 nv_icmd(priv, 0x0000099b, 0x00000000);
2557 nv_icmd(priv, 0x0000099c, 0x00000000);
2558 nv_icmd(priv, 0x0000099d, 0x00000000);
2559 nv_icmd(priv, 0x0000099e, 0x00000000);
2560 nv_icmd(priv, 0x0000099f, 0x00000000);
2561 nv_icmd(priv, 0x000009a0, 0x00000000);
2562 nv_icmd(priv, 0x000009a1, 0x00000000);
2563 nv_icmd(priv, 0x000009a2, 0x00000000);
2564 nv_icmd(priv, 0x000009a3, 0x00000000);
2565 nv_icmd(priv, 0x000009a4, 0x00000000);
2566 nv_icmd(priv, 0x000009a5, 0x00000000);
2567 nv_icmd(priv, 0x000009a6, 0x00000000);
2568 nv_icmd(priv, 0x000009a7, 0x00000000);
2569 nv_icmd(priv, 0x000009a8, 0x00000000);
2570 nv_icmd(priv, 0x000009a9, 0x00000000);
2571 nv_icmd(priv, 0x000009aa, 0x00000000);
2572 nv_icmd(priv, 0x000009ab, 0x00000000);
2573 nv_icmd(priv, 0x000009ac, 0x00000000);
2574 nv_icmd(priv, 0x000009ad, 0x00000000);
2575 nv_icmd(priv, 0x000009ae, 0x00000000);
2576 nv_icmd(priv, 0x000009af, 0x00000000);
2577 nv_icmd(priv, 0x000009b0, 0x00000000);
2578 nv_icmd(priv, 0x000009b1, 0x00000000);
2579 nv_icmd(priv, 0x000009b2, 0x00000000);
2580 nv_icmd(priv, 0x000009b3, 0x00000000);
2581 nv_icmd(priv, 0x000009b4, 0x00000000);
2582 nv_icmd(priv, 0x000009b5, 0x00000000);
2583 nv_icmd(priv, 0x000009b6, 0x00000000);
2584 nv_icmd(priv, 0x000009b7, 0x00000000);
2585 nv_icmd(priv, 0x000009b8, 0x00000000);
2586 nv_icmd(priv, 0x000009b9, 0x00000000);
2587 nv_icmd(priv, 0x000009ba, 0x00000000);
2588 nv_icmd(priv, 0x000009bb, 0x00000000);
2589 nv_icmd(priv, 0x000009bc, 0x00000000);
2590 nv_icmd(priv, 0x000009bd, 0x00000000);
2591 nv_icmd(priv, 0x000009be, 0x00000000);
2592 nv_icmd(priv, 0x000009bf, 0x00000000);
2593 nv_icmd(priv, 0x000009c0, 0x00000000);
2594 nv_icmd(priv, 0x000009c1, 0x00000000);
2595 nv_icmd(priv, 0x000009c2, 0x00000000);
2596 nv_icmd(priv, 0x000009c3, 0x00000000);
2597 nv_icmd(priv, 0x000009c4, 0x00000000);
2598 nv_icmd(priv, 0x000009c5, 0x00000000);
2599 nv_icmd(priv, 0x000009c6, 0x00000000);
2600 nv_icmd(priv, 0x000009c7, 0x00000000);
2601 nv_icmd(priv, 0x000009c8, 0x00000000);
2602 nv_icmd(priv, 0x000009c9, 0x00000000);
2603 nv_icmd(priv, 0x000009ca, 0x00000000);
2604 nv_icmd(priv, 0x000009cb, 0x00000000);
2605 nv_icmd(priv, 0x000009cc, 0x00000000);
2606 nv_icmd(priv, 0x000009cd, 0x00000000);
2607 nv_icmd(priv, 0x000009ce, 0x00000000);
2608 nv_icmd(priv, 0x000009cf, 0x00000000);
2609 nv_icmd(priv, 0x000009d0, 0x00000000);
2610 nv_icmd(priv, 0x000009d1, 0x00000000);
2611 nv_icmd(priv, 0x000009d2, 0x00000000);
2612 nv_icmd(priv, 0x000009d3, 0x00000000);
2613 nv_icmd(priv, 0x000009d4, 0x00000000);
2614 nv_icmd(priv, 0x000009d5, 0x00000000);
2615 nv_icmd(priv, 0x000009d6, 0x00000000);
2616 nv_icmd(priv, 0x000009d7, 0x00000000);
2617 nv_icmd(priv, 0x000009d8, 0x00000000);
2618 nv_icmd(priv, 0x000009d9, 0x00000000);
2619 nv_icmd(priv, 0x000009da, 0x00000000);
2620 nv_icmd(priv, 0x000009db, 0x00000000);
2621 nv_icmd(priv, 0x000009dc, 0x00000000);
2622 nv_icmd(priv, 0x000009dd, 0x00000000);
2623 nv_icmd(priv, 0x000009de, 0x00000000);
2624 nv_icmd(priv, 0x000009df, 0x00000000);
2625 nv_icmd(priv, 0x000009e0, 0x00000000);
2626 nv_icmd(priv, 0x000009e1, 0x00000000);
2627 nv_icmd(priv, 0x000009e2, 0x00000000);
2628 nv_icmd(priv, 0x000009e3, 0x00000000);
2629 nv_icmd(priv, 0x000009e4, 0x00000000);
2630 nv_icmd(priv, 0x000009e5, 0x00000000);
2631 nv_icmd(priv, 0x000009e6, 0x00000000);
2632 nv_icmd(priv, 0x000009e7, 0x00000000);
2633 nv_icmd(priv, 0x000009e8, 0x00000000);
2634 nv_icmd(priv, 0x000009e9, 0x00000000);
2635 nv_icmd(priv, 0x000009ea, 0x00000000);
2636 nv_icmd(priv, 0x000009eb, 0x00000000);
2637 nv_icmd(priv, 0x000009ec, 0x00000000);
2638 nv_icmd(priv, 0x000009ed, 0x00000000);
2639 nv_icmd(priv, 0x000009ee, 0x00000000);
2640 nv_icmd(priv, 0x000009ef, 0x00000000);
2641 nv_icmd(priv, 0x000009f0, 0x00000000);
2642 nv_icmd(priv, 0x000009f1, 0x00000000);
2643 nv_icmd(priv, 0x000009f2, 0x00000000);
2644 nv_icmd(priv, 0x000009f3, 0x00000000);
2645 nv_icmd(priv, 0x000009f4, 0x00000000);
2646 nv_icmd(priv, 0x000009f5, 0x00000000);
2647 nv_icmd(priv, 0x000009f6, 0x00000000);
2648 nv_icmd(priv, 0x000009f7, 0x00000000);
2649 nv_icmd(priv, 0x000009f8, 0x00000000);
2650 nv_icmd(priv, 0x000009f9, 0x00000000);
2651 nv_icmd(priv, 0x000009fa, 0x00000000);
2652 nv_icmd(priv, 0x000009fb, 0x00000000);
2653 nv_icmd(priv, 0x000009fc, 0x00000000);
2654 nv_icmd(priv, 0x000009fd, 0x00000000);
2655 nv_icmd(priv, 0x000009fe, 0x00000000);
2656 nv_icmd(priv, 0x000009ff, 0x00000000);
2657 nv_icmd(priv, 0x00000468, 0x00000004);
2658 nv_icmd(priv, 0x0000046c, 0x00000001);
2659 nv_icmd(priv, 0x00000470, 0x00000000);
2660 nv_icmd(priv, 0x00000471, 0x00000000);
2661 nv_icmd(priv, 0x00000472, 0x00000000);
2662 nv_icmd(priv, 0x00000473, 0x00000000);
2663 nv_icmd(priv, 0x00000474, 0x00000000);
2664 nv_icmd(priv, 0x00000475, 0x00000000);
2665 nv_icmd(priv, 0x00000476, 0x00000000);
2666 nv_icmd(priv, 0x00000477, 0x00000000);
2667 nv_icmd(priv, 0x00000478, 0x00000000);
2668 nv_icmd(priv, 0x00000479, 0x00000000);
2669 nv_icmd(priv, 0x0000047a, 0x00000000);
2670 nv_icmd(priv, 0x0000047b, 0x00000000);
2671 nv_icmd(priv, 0x0000047c, 0x00000000);
2672 nv_icmd(priv, 0x0000047d, 0x00000000);
2673 nv_icmd(priv, 0x0000047e, 0x00000000);
2674 nv_icmd(priv, 0x0000047f, 0x00000000);
2675 nv_icmd(priv, 0x00000480, 0x00000000);
2676 nv_icmd(priv, 0x00000481, 0x00000000);
2677 nv_icmd(priv, 0x00000482, 0x00000000);
2678 nv_icmd(priv, 0x00000483, 0x00000000);
2679 nv_icmd(priv, 0x00000484, 0x00000000);
2680 nv_icmd(priv, 0x00000485, 0x00000000);
2681 nv_icmd(priv, 0x00000486, 0x00000000);
2682 nv_icmd(priv, 0x00000487, 0x00000000);
2683 nv_icmd(priv, 0x00000488, 0x00000000);
2684 nv_icmd(priv, 0x00000489, 0x00000000);
2685 nv_icmd(priv, 0x0000048a, 0x00000000);
2686 nv_icmd(priv, 0x0000048b, 0x00000000);
2687 nv_icmd(priv, 0x0000048c, 0x00000000);
2688 nv_icmd(priv, 0x0000048d, 0x00000000);
2689 nv_icmd(priv, 0x0000048e, 0x00000000);
2690 nv_icmd(priv, 0x0000048f, 0x00000000);
2691 nv_icmd(priv, 0x00000490, 0x00000000);
2692 nv_icmd(priv, 0x00000491, 0x00000000);
2693 nv_icmd(priv, 0x00000492, 0x00000000);
2694 nv_icmd(priv, 0x00000493, 0x00000000);
2695 nv_icmd(priv, 0x00000494, 0x00000000);
2696 nv_icmd(priv, 0x00000495, 0x00000000);
2697 nv_icmd(priv, 0x00000496, 0x00000000);
2698 nv_icmd(priv, 0x00000497, 0x00000000);
2699 nv_icmd(priv, 0x00000498, 0x00000000);
2700 nv_icmd(priv, 0x00000499, 0x00000000);
2701 nv_icmd(priv, 0x0000049a, 0x00000000);
2702 nv_icmd(priv, 0x0000049b, 0x00000000);
2703 nv_icmd(priv, 0x0000049c, 0x00000000);
2704 nv_icmd(priv, 0x0000049d, 0x00000000);
2705 nv_icmd(priv, 0x0000049e, 0x00000000);
2706 nv_icmd(priv, 0x0000049f, 0x00000000);
2707 nv_icmd(priv, 0x000004a0, 0x00000000);
2708 nv_icmd(priv, 0x000004a1, 0x00000000);
2709 nv_icmd(priv, 0x000004a2, 0x00000000);
2710 nv_icmd(priv, 0x000004a3, 0x00000000);
2711 nv_icmd(priv, 0x000004a4, 0x00000000);
2712 nv_icmd(priv, 0x000004a5, 0x00000000);
2713 nv_icmd(priv, 0x000004a6, 0x00000000);
2714 nv_icmd(priv, 0x000004a7, 0x00000000);
2715 nv_icmd(priv, 0x000004a8, 0x00000000);
2716 nv_icmd(priv, 0x000004a9, 0x00000000);
2717 nv_icmd(priv, 0x000004aa, 0x00000000);
2718 nv_icmd(priv, 0x000004ab, 0x00000000);
2719 nv_icmd(priv, 0x000004ac, 0x00000000);
2720 nv_icmd(priv, 0x000004ad, 0x00000000);
2721 nv_icmd(priv, 0x000004ae, 0x00000000);
2722 nv_icmd(priv, 0x000004af, 0x00000000);
2723 nv_icmd(priv, 0x000004b0, 0x00000000);
2724 nv_icmd(priv, 0x000004b1, 0x00000000);
2725 nv_icmd(priv, 0x000004b2, 0x00000000);
2726 nv_icmd(priv, 0x000004b3, 0x00000000);
2727 nv_icmd(priv, 0x000004b4, 0x00000000);
2728 nv_icmd(priv, 0x000004b5, 0x00000000);
2729 nv_icmd(priv, 0x000004b6, 0x00000000);
2730 nv_icmd(priv, 0x000004b7, 0x00000000);
2731 nv_icmd(priv, 0x000004b8, 0x00000000);
2732 nv_icmd(priv, 0x000004b9, 0x00000000);
2733 nv_icmd(priv, 0x000004ba, 0x00000000);
2734 nv_icmd(priv, 0x000004bb, 0x00000000);
2735 nv_icmd(priv, 0x000004bc, 0x00000000);
2736 nv_icmd(priv, 0x000004bd, 0x00000000);
2737 nv_icmd(priv, 0x000004be, 0x00000000);
2738 nv_icmd(priv, 0x000004bf, 0x00000000);
2739 nv_icmd(priv, 0x000004c0, 0x00000000);
2740 nv_icmd(priv, 0x000004c1, 0x00000000);
2741 nv_icmd(priv, 0x000004c2, 0x00000000);
2742 nv_icmd(priv, 0x000004c3, 0x00000000);
2743 nv_icmd(priv, 0x000004c4, 0x00000000);
2744 nv_icmd(priv, 0x000004c5, 0x00000000);
2745 nv_icmd(priv, 0x000004c6, 0x00000000);
2746 nv_icmd(priv, 0x000004c7, 0x00000000);
2747 nv_icmd(priv, 0x000004c8, 0x00000000);
2748 nv_icmd(priv, 0x000004c9, 0x00000000);
2749 nv_icmd(priv, 0x000004ca, 0x00000000);
2750 nv_icmd(priv, 0x000004cb, 0x00000000);
2751 nv_icmd(priv, 0x000004cc, 0x00000000);
2752 nv_icmd(priv, 0x000004cd, 0x00000000);
2753 nv_icmd(priv, 0x000004ce, 0x00000000);
2754 nv_icmd(priv, 0x000004cf, 0x00000000);
2755 nv_icmd(priv, 0x00000510, 0x3f800000);
2756 nv_icmd(priv, 0x00000511, 0x3f800000);
2757 nv_icmd(priv, 0x00000512, 0x3f800000);
2758 nv_icmd(priv, 0x00000513, 0x3f800000);
2759 nv_icmd(priv, 0x00000514, 0x3f800000);
2760 nv_icmd(priv, 0x00000515, 0x3f800000);
2761 nv_icmd(priv, 0x00000516, 0x3f800000);
2762 nv_icmd(priv, 0x00000517, 0x3f800000);
2763 nv_icmd(priv, 0x00000518, 0x3f800000);
2764 nv_icmd(priv, 0x00000519, 0x3f800000);
2765 nv_icmd(priv, 0x0000051a, 0x3f800000);
2766 nv_icmd(priv, 0x0000051b, 0x3f800000);
2767 nv_icmd(priv, 0x0000051c, 0x3f800000);
2768 nv_icmd(priv, 0x0000051d, 0x3f800000);
2769 nv_icmd(priv, 0x0000051e, 0x3f800000);
2770 nv_icmd(priv, 0x0000051f, 0x3f800000);
2771 nv_icmd(priv, 0x00000520, 0x000002b6);
2772 nv_icmd(priv, 0x00000529, 0x00000001);
2773 nv_icmd(priv, 0x00000530, 0xffff0000);
2774 nv_icmd(priv, 0x00000531, 0xffff0000);
2775 nv_icmd(priv, 0x00000532, 0xffff0000);
2776 nv_icmd(priv, 0x00000533, 0xffff0000);
2777 nv_icmd(priv, 0x00000534, 0xffff0000);
2778 nv_icmd(priv, 0x00000535, 0xffff0000);
2779 nv_icmd(priv, 0x00000536, 0xffff0000);
2780 nv_icmd(priv, 0x00000537, 0xffff0000);
2781 nv_icmd(priv, 0x00000538, 0xffff0000);
2782 nv_icmd(priv, 0x00000539, 0xffff0000);
2783 nv_icmd(priv, 0x0000053a, 0xffff0000);
2784 nv_icmd(priv, 0x0000053b, 0xffff0000);
2785 nv_icmd(priv, 0x0000053c, 0xffff0000);
2786 nv_icmd(priv, 0x0000053d, 0xffff0000);
2787 nv_icmd(priv, 0x0000053e, 0xffff0000);
2788 nv_icmd(priv, 0x0000053f, 0xffff0000);
2789 nv_icmd(priv, 0x00000585, 0x0000003f);
2790 nv_icmd(priv, 0x00000576, 0x00000003);
2791 if (nv_device(priv)->chipset == 0xc1 ||
2792 nv_device(priv)->chipset == 0xd9)
2793 nv_icmd(priv, 0x0000057b, 0x00000059);
2794 nv_icmd(priv, 0x00000586, 0x00000040);
2795 nv_icmd(priv, 0x00000582, 0x00000080);
2796 nv_icmd(priv, 0x00000583, 0x00000080);
2797 nv_icmd(priv, 0x000005c2, 0x00000001);
2798 nv_icmd(priv, 0x00000638, 0x00000001);
2799 nv_icmd(priv, 0x00000639, 0x00000001);
2800 nv_icmd(priv, 0x0000063a, 0x00000002);
2801 nv_icmd(priv, 0x0000063b, 0x00000001);
2802 nv_icmd(priv, 0x0000063c, 0x00000001);
2803 nv_icmd(priv, 0x0000063d, 0x00000002);
2804 nv_icmd(priv, 0x0000063e, 0x00000001);
2805 nv_icmd(priv, 0x000008b8, 0x00000001);
2806 nv_icmd(priv, 0x000008b9, 0x00000001);
2807 nv_icmd(priv, 0x000008ba, 0x00000001);
2808 nv_icmd(priv, 0x000008bb, 0x00000001);
2809 nv_icmd(priv, 0x000008bc, 0x00000001);
2810 nv_icmd(priv, 0x000008bd, 0x00000001);
2811 nv_icmd(priv, 0x000008be, 0x00000001);
2812 nv_icmd(priv, 0x000008bf, 0x00000001);
2813 nv_icmd(priv, 0x00000900, 0x00000001);
2814 nv_icmd(priv, 0x00000901, 0x00000001);
2815 nv_icmd(priv, 0x00000902, 0x00000001);
2816 nv_icmd(priv, 0x00000903, 0x00000001);
2817 nv_icmd(priv, 0x00000904, 0x00000001);
2818 nv_icmd(priv, 0x00000905, 0x00000001);
2819 nv_icmd(priv, 0x00000906, 0x00000001);
2820 nv_icmd(priv, 0x00000907, 0x00000001);
2821 nv_icmd(priv, 0x00000908, 0x00000002);
2822 nv_icmd(priv, 0x00000909, 0x00000002);
2823 nv_icmd(priv, 0x0000090a, 0x00000002);
2824 nv_icmd(priv, 0x0000090b, 0x00000002);
2825 nv_icmd(priv, 0x0000090c, 0x00000002);
2826 nv_icmd(priv, 0x0000090d, 0x00000002);
2827 nv_icmd(priv, 0x0000090e, 0x00000002);
2828 nv_icmd(priv, 0x0000090f, 0x00000002);
2829 nv_icmd(priv, 0x00000910, 0x00000001);
2830 nv_icmd(priv, 0x00000911, 0x00000001);
2831 nv_icmd(priv, 0x00000912, 0x00000001);
2832 nv_icmd(priv, 0x00000913, 0x00000001);
2833 nv_icmd(priv, 0x00000914, 0x00000001);
2834 nv_icmd(priv, 0x00000915, 0x00000001);
2835 nv_icmd(priv, 0x00000916, 0x00000001);
2836 nv_icmd(priv, 0x00000917, 0x00000001);
2837 nv_icmd(priv, 0x00000918, 0x00000001);
2838 nv_icmd(priv, 0x00000919, 0x00000001);
2839 nv_icmd(priv, 0x0000091a, 0x00000001);
2840 nv_icmd(priv, 0x0000091b, 0x00000001);
2841 nv_icmd(priv, 0x0000091c, 0x00000001);
2842 nv_icmd(priv, 0x0000091d, 0x00000001);
2843 nv_icmd(priv, 0x0000091e, 0x00000001);
2844 nv_icmd(priv, 0x0000091f, 0x00000001);
2845 nv_icmd(priv, 0x00000920, 0x00000002);
2846 nv_icmd(priv, 0x00000921, 0x00000002);
2847 nv_icmd(priv, 0x00000922, 0x00000002);
2848 nv_icmd(priv, 0x00000923, 0x00000002);
2849 nv_icmd(priv, 0x00000924, 0x00000002);
2850 nv_icmd(priv, 0x00000925, 0x00000002);
2851 nv_icmd(priv, 0x00000926, 0x00000002);
2852 nv_icmd(priv, 0x00000927, 0x00000002);
2853 nv_icmd(priv, 0x00000928, 0x00000001);
2854 nv_icmd(priv, 0x00000929, 0x00000001);
2855 nv_icmd(priv, 0x0000092a, 0x00000001);
2856 nv_icmd(priv, 0x0000092b, 0x00000001);
2857 nv_icmd(priv, 0x0000092c, 0x00000001);
2858 nv_icmd(priv, 0x0000092d, 0x00000001);
2859 nv_icmd(priv, 0x0000092e, 0x00000001);
2860 nv_icmd(priv, 0x0000092f, 0x00000001);
2861 nv_icmd(priv, 0x00000648, 0x00000001);
2862 nv_icmd(priv, 0x00000649, 0x00000001);
2863 nv_icmd(priv, 0x0000064a, 0x00000001);
2864 nv_icmd(priv, 0x0000064b, 0x00000001);
2865 nv_icmd(priv, 0x0000064c, 0x00000001);
2866 nv_icmd(priv, 0x0000064d, 0x00000001);
2867 nv_icmd(priv, 0x0000064e, 0x00000001);
2868 nv_icmd(priv, 0x0000064f, 0x00000001);
2869 nv_icmd(priv, 0x00000650, 0x00000001);
2870 nv_icmd(priv, 0x00000658, 0x0000000f);
2871 nv_icmd(priv, 0x000007ff, 0x0000000a);
2872 nv_icmd(priv, 0x0000066a, 0x40000000);
2873 nv_icmd(priv, 0x0000066b, 0x10000000);
2874 nv_icmd(priv, 0x0000066c, 0xffff0000);
2875 nv_icmd(priv, 0x0000066d, 0xffff0000);
2876 nv_icmd(priv, 0x000007af, 0x00000008);
2877 nv_icmd(priv, 0x000007b0, 0x00000008);
2878 nv_icmd(priv, 0x000007f6, 0x00000001);
2879 nv_icmd(priv, 0x000006b2, 0x00000055);
2880 nv_icmd(priv, 0x000007ad, 0x00000003);
2881 nv_icmd(priv, 0x00000937, 0x00000001);
2882 nv_icmd(priv, 0x00000971, 0x00000008);
2883 nv_icmd(priv, 0x00000972, 0x00000040);
2884 nv_icmd(priv, 0x00000973, 0x0000012c);
2885 nv_icmd(priv, 0x0000097c, 0x00000040);
2886 nv_icmd(priv, 0x00000979, 0x00000003);
2887 nv_icmd(priv, 0x00000975, 0x00000020);
2888 nv_icmd(priv, 0x00000976, 0x00000001);
2889 nv_icmd(priv, 0x00000977, 0x00000020);
2890 nv_icmd(priv, 0x00000978, 0x00000001);
2891 nv_icmd(priv, 0x00000957, 0x00000003);
2892 nv_icmd(priv, 0x0000095e, 0x20164010);
2893 nv_icmd(priv, 0x0000095f, 0x00000020);
2894 if (nv_device(priv)->chipset == 0xd9)
2895 nv_icmd(priv, 0x0000097d, 0x00000020);
2896 nv_icmd(priv, 0x00000683, 0x00000006);
2897 nv_icmd(priv, 0x00000685, 0x003fffff);
2898 nv_icmd(priv, 0x00000687, 0x00000c48);
2899 nv_icmd(priv, 0x000006a0, 0x00000005);
2900 nv_icmd(priv, 0x00000840, 0x00300008);
2901 nv_icmd(priv, 0x00000841, 0x04000080);
2902 nv_icmd(priv, 0x00000842, 0x00300008);
2903 nv_icmd(priv, 0x00000843, 0x04000080);
2904 nv_icmd(priv, 0x00000818, 0x00000000);
2905 nv_icmd(priv, 0x00000819, 0x00000000);
2906 nv_icmd(priv, 0x0000081a, 0x00000000);
2907 nv_icmd(priv, 0x0000081b, 0x00000000);
2908 nv_icmd(priv, 0x0000081c, 0x00000000);
2909 nv_icmd(priv, 0x0000081d, 0x00000000);
2910 nv_icmd(priv, 0x0000081e, 0x00000000);
2911 nv_icmd(priv, 0x0000081f, 0x00000000);
2912 nv_icmd(priv, 0x00000848, 0x00000000);
2913 nv_icmd(priv, 0x00000849, 0x00000000);
2914 nv_icmd(priv, 0x0000084a, 0x00000000);
2915 nv_icmd(priv, 0x0000084b, 0x00000000);
2916 nv_icmd(priv, 0x0000084c, 0x00000000);
2917 nv_icmd(priv, 0x0000084d, 0x00000000);
2918 nv_icmd(priv, 0x0000084e, 0x00000000);
2919 nv_icmd(priv, 0x0000084f, 0x00000000);
2920 nv_icmd(priv, 0x00000850, 0x00000000);
2921 nv_icmd(priv, 0x00000851, 0x00000000);
2922 nv_icmd(priv, 0x00000852, 0x00000000);
2923 nv_icmd(priv, 0x00000853, 0x00000000);
2924 nv_icmd(priv, 0x00000854, 0x00000000);
2925 nv_icmd(priv, 0x00000855, 0x00000000);
2926 nv_icmd(priv, 0x00000856, 0x00000000);
2927 nv_icmd(priv, 0x00000857, 0x00000000);
2928 nv_icmd(priv, 0x00000738, 0x00000000);
2929 nv_icmd(priv, 0x000006aa, 0x00000001);
2930 nv_icmd(priv, 0x000006ab, 0x00000002);
2931 nv_icmd(priv, 0x000006ac, 0x00000080);
2932 nv_icmd(priv, 0x000006ad, 0x00000100);
2933 nv_icmd(priv, 0x000006ae, 0x00000100);
2934 nv_icmd(priv, 0x000006b1, 0x00000011);
2935 nv_icmd(priv, 0x000006bb, 0x000000cf);
2936 nv_icmd(priv, 0x000006ce, 0x2a712488);
2937 nv_icmd(priv, 0x00000739, 0x4085c000);
2938 nv_icmd(priv, 0x0000073a, 0x00000080);
2939 nv_icmd(priv, 0x00000786, 0x80000100);
2940 nv_icmd(priv, 0x0000073c, 0x00010100);
2941 nv_icmd(priv, 0x0000073d, 0x02800000);
2942 nv_icmd(priv, 0x00000787, 0x000000cf);
2943 nv_icmd(priv, 0x0000078c, 0x00000008);
2944 nv_icmd(priv, 0x00000792, 0x00000001);
2945 nv_icmd(priv, 0x00000794, 0x00000001);
2946 nv_icmd(priv, 0x00000795, 0x00000001);
2947 nv_icmd(priv, 0x00000796, 0x00000001);
2948 nv_icmd(priv, 0x00000797, 0x000000cf);
2949 nv_icmd(priv, 0x00000836, 0x00000001);
2950 nv_icmd(priv, 0x0000079a, 0x00000002);
2951 nv_icmd(priv, 0x00000833, 0x04444480);
2952 nv_icmd(priv, 0x000007a1, 0x00000001);
2953 nv_icmd(priv, 0x000007a3, 0x00000001);
2954 nv_icmd(priv, 0x000007a4, 0x00000001);
2955 nv_icmd(priv, 0x000007a5, 0x00000001);
2956 nv_icmd(priv, 0x00000831, 0x00000004);
2957 nv_icmd(priv, 0x0000080c, 0x00000002);
2958 nv_icmd(priv, 0x0000080d, 0x00000100);
2959 nv_icmd(priv, 0x0000080e, 0x00000100);
2960 nv_icmd(priv, 0x0000080f, 0x00000001);
2961 nv_icmd(priv, 0x00000823, 0x00000002);
2962 nv_icmd(priv, 0x00000824, 0x00000100);
2963 nv_icmd(priv, 0x00000825, 0x00000100);
2964 nv_icmd(priv, 0x00000826, 0x00000001);
2965 nv_icmd(priv, 0x0000095d, 0x00000001);
2966 nv_icmd(priv, 0x0000082b, 0x00000004);
2967 nv_icmd(priv, 0x00000942, 0x00010001);
2968 nv_icmd(priv, 0x00000943, 0x00000001);
2969 nv_icmd(priv, 0x00000944, 0x00000022);
2970 nv_icmd(priv, 0x000007c5, 0x00010001);
2971 nv_icmd(priv, 0x00000834, 0x00000001);
2972 nv_icmd(priv, 0x000007c7, 0x00000001);
2973 nv_icmd(priv, 0x0000c1b0, 0x0000000f);
2974 nv_icmd(priv, 0x0000c1b1, 0x0000000f);
2975 nv_icmd(priv, 0x0000c1b2, 0x0000000f);
2976 nv_icmd(priv, 0x0000c1b3, 0x0000000f);
2977 nv_icmd(priv, 0x0000c1b4, 0x0000000f);
2978 nv_icmd(priv, 0x0000c1b5, 0x0000000f);
2979 nv_icmd(priv, 0x0000c1b6, 0x0000000f);
2980 nv_icmd(priv, 0x0000c1b7, 0x0000000f);
2981 nv_icmd(priv, 0x0000c1b8, 0x0fac6881);
2982 nv_icmd(priv, 0x0000c1b9, 0x00fac688);
2983 nv_icmd(priv, 0x0001e100, 0x00000001);
2984 nv_icmd(priv, 0x00001000, 0x00000002);
2985 nv_icmd(priv, 0x000006aa, 0x00000001);
2986 nv_icmd(priv, 0x000006ad, 0x00000100);
2987 nv_icmd(priv, 0x000006ae, 0x00000100);
2988 nv_icmd(priv, 0x000006b1, 0x00000011);
2989 nv_icmd(priv, 0x0000078c, 0x00000008);
2990 nv_icmd(priv, 0x00000792, 0x00000001);
2991 nv_icmd(priv, 0x00000794, 0x00000001);
2992 nv_icmd(priv, 0x00000795, 0x00000001);
2993 nv_icmd(priv, 0x00000796, 0x00000001);
2994 nv_icmd(priv, 0x00000797, 0x000000cf);
2995 nv_icmd(priv, 0x0000079a, 0x00000002);
2996 nv_icmd(priv, 0x00000833, 0x04444480);
2997 nv_icmd(priv, 0x000007a1, 0x00000001);
2998 nv_icmd(priv, 0x000007a3, 0x00000001);
2999 nv_icmd(priv, 0x000007a4, 0x00000001);
3000 nv_icmd(priv, 0x000007a5, 0x00000001);
3001 nv_icmd(priv, 0x00000831, 0x00000004);
3002 nv_icmd(priv, 0x0001e100, 0x00000001);
3003 nv_icmd(priv, 0x00001000, 0x00000014);
3004 nv_icmd(priv, 0x00000351, 0x00000100);
3005 nv_icmd(priv, 0x00000957, 0x00000003);
3006 nv_icmd(priv, 0x0000095d, 0x00000001);
3007 nv_icmd(priv, 0x0000082b, 0x00000004);
3008 nv_icmd(priv, 0x00000942, 0x00010001);
3009 nv_icmd(priv, 0x00000943, 0x00000001);
3010 nv_icmd(priv, 0x000007c5, 0x00010001);
3011 nv_icmd(priv, 0x00000834, 0x00000001);
3012 nv_icmd(priv, 0x000007c7, 0x00000001);
3013 nv_icmd(priv, 0x0001e100, 0x00000001);
3014 nv_icmd(priv, 0x00001000, 0x00000001);
3015 nv_icmd(priv, 0x0000080c, 0x00000002);
3016 nv_icmd(priv, 0x0000080d, 0x00000100);
3017 nv_icmd(priv, 0x0000080e, 0x00000100);
3018 nv_icmd(priv, 0x0000080f, 0x00000001);
3019 nv_icmd(priv, 0x00000823, 0x00000002);
3020 nv_icmd(priv, 0x00000824, 0x00000100);
3021 nv_icmd(priv, 0x00000825, 0x00000100);
3022 nv_icmd(priv, 0x00000826, 0x00000001);
3023 nv_icmd(priv, 0x0001e100, 0x00000001);
3024 nv_wr32(priv, 0x400208, 0x00000000);
3025 nv_wr32(priv, 0x404154, 0x00000400);
3026
3027 nvc0_grctx_generate_9097(priv);
3028 if (fermi >= 0x9197)
3029 nvc0_grctx_generate_9197(priv);
3030 if (fermi >= 0x9297)
3031 nvc0_grctx_generate_9297(priv);
3032 nvc0_grctx_generate_902d(priv);
3033 nvc0_grctx_generate_9039(priv);
3034 nvc0_grctx_generate_90c0(priv);
3035
3036 nv_wr32(priv, 0x000260, r000260);
3037
3038 return nvc0_grctx_fini(&info);
3039}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
new file mode 100644
index 000000000000..6d8c63931ee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -0,0 +1,2788 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27static void
28nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
29{
30 nv_wr32(priv, 0x400208, 0x80000000);
31 nv_icmd(priv, 0x001000, 0x00000004);
32 nv_icmd(priv, 0x000039, 0x00000000);
33 nv_icmd(priv, 0x00003a, 0x00000000);
34 nv_icmd(priv, 0x00003b, 0x00000000);
35 nv_icmd(priv, 0x0000a9, 0x0000ffff);
36 nv_icmd(priv, 0x000038, 0x0fac6881);
37 nv_icmd(priv, 0x00003d, 0x00000001);
38 nv_icmd(priv, 0x0000e8, 0x00000400);
39 nv_icmd(priv, 0x0000e9, 0x00000400);
40 nv_icmd(priv, 0x0000ea, 0x00000400);
41 nv_icmd(priv, 0x0000eb, 0x00000400);
42 nv_icmd(priv, 0x0000ec, 0x00000400);
43 nv_icmd(priv, 0x0000ed, 0x00000400);
44 nv_icmd(priv, 0x0000ee, 0x00000400);
45 nv_icmd(priv, 0x0000ef, 0x00000400);
46 nv_icmd(priv, 0x000078, 0x00000300);
47 nv_icmd(priv, 0x000079, 0x00000300);
48 nv_icmd(priv, 0x00007a, 0x00000300);
49 nv_icmd(priv, 0x00007b, 0x00000300);
50 nv_icmd(priv, 0x00007c, 0x00000300);
51 nv_icmd(priv, 0x00007d, 0x00000300);
52 nv_icmd(priv, 0x00007e, 0x00000300);
53 nv_icmd(priv, 0x00007f, 0x00000300);
54 nv_icmd(priv, 0x000050, 0x00000011);
55 nv_icmd(priv, 0x000058, 0x00000008);
56 nv_icmd(priv, 0x000059, 0x00000008);
57 nv_icmd(priv, 0x00005a, 0x00000008);
58 nv_icmd(priv, 0x00005b, 0x00000008);
59 nv_icmd(priv, 0x00005c, 0x00000008);
60 nv_icmd(priv, 0x00005d, 0x00000008);
61 nv_icmd(priv, 0x00005e, 0x00000008);
62 nv_icmd(priv, 0x00005f, 0x00000008);
63 nv_icmd(priv, 0x000208, 0x00000001);
64 nv_icmd(priv, 0x000209, 0x00000001);
65 nv_icmd(priv, 0x00020a, 0x00000001);
66 nv_icmd(priv, 0x00020b, 0x00000001);
67 nv_icmd(priv, 0x00020c, 0x00000001);
68 nv_icmd(priv, 0x00020d, 0x00000001);
69 nv_icmd(priv, 0x00020e, 0x00000001);
70 nv_icmd(priv, 0x00020f, 0x00000001);
71 nv_icmd(priv, 0x000081, 0x00000001);
72 nv_icmd(priv, 0x000085, 0x00000004);
73 nv_icmd(priv, 0x000088, 0x00000400);
74 nv_icmd(priv, 0x000090, 0x00000300);
75 nv_icmd(priv, 0x000098, 0x00001001);
76 nv_icmd(priv, 0x0000e3, 0x00000001);
77 nv_icmd(priv, 0x0000da, 0x00000001);
78 nv_icmd(priv, 0x0000f8, 0x00000003);
79 nv_icmd(priv, 0x0000fa, 0x00000001);
80 nv_icmd(priv, 0x00009f, 0x0000ffff);
81 nv_icmd(priv, 0x0000a0, 0x0000ffff);
82 nv_icmd(priv, 0x0000a1, 0x0000ffff);
83 nv_icmd(priv, 0x0000a2, 0x0000ffff);
84 nv_icmd(priv, 0x0000b1, 0x00000001);
85 nv_icmd(priv, 0x0000ad, 0x0000013e);
86 nv_icmd(priv, 0x0000e1, 0x00000010);
87 nv_icmd(priv, 0x000290, 0x00000000);
88 nv_icmd(priv, 0x000291, 0x00000000);
89 nv_icmd(priv, 0x000292, 0x00000000);
90 nv_icmd(priv, 0x000293, 0x00000000);
91 nv_icmd(priv, 0x000294, 0x00000000);
92 nv_icmd(priv, 0x000295, 0x00000000);
93 nv_icmd(priv, 0x000296, 0x00000000);
94 nv_icmd(priv, 0x000297, 0x00000000);
95 nv_icmd(priv, 0x000298, 0x00000000);
96 nv_icmd(priv, 0x000299, 0x00000000);
97 nv_icmd(priv, 0x00029a, 0x00000000);
98 nv_icmd(priv, 0x00029b, 0x00000000);
99 nv_icmd(priv, 0x00029c, 0x00000000);
100 nv_icmd(priv, 0x00029d, 0x00000000);
101 nv_icmd(priv, 0x00029e, 0x00000000);
102 nv_icmd(priv, 0x00029f, 0x00000000);
103 nv_icmd(priv, 0x0003b0, 0x00000000);
104 nv_icmd(priv, 0x0003b1, 0x00000000);
105 nv_icmd(priv, 0x0003b2, 0x00000000);
106 nv_icmd(priv, 0x0003b3, 0x00000000);
107 nv_icmd(priv, 0x0003b4, 0x00000000);
108 nv_icmd(priv, 0x0003b5, 0x00000000);
109 nv_icmd(priv, 0x0003b6, 0x00000000);
110 nv_icmd(priv, 0x0003b7, 0x00000000);
111 nv_icmd(priv, 0x0003b8, 0x00000000);
112 nv_icmd(priv, 0x0003b9, 0x00000000);
113 nv_icmd(priv, 0x0003ba, 0x00000000);
114 nv_icmd(priv, 0x0003bb, 0x00000000);
115 nv_icmd(priv, 0x0003bc, 0x00000000);
116 nv_icmd(priv, 0x0003bd, 0x00000000);
117 nv_icmd(priv, 0x0003be, 0x00000000);
118 nv_icmd(priv, 0x0003bf, 0x00000000);
119 nv_icmd(priv, 0x0002a0, 0x00000000);
120 nv_icmd(priv, 0x0002a1, 0x00000000);
121 nv_icmd(priv, 0x0002a2, 0x00000000);
122 nv_icmd(priv, 0x0002a3, 0x00000000);
123 nv_icmd(priv, 0x0002a4, 0x00000000);
124 nv_icmd(priv, 0x0002a5, 0x00000000);
125 nv_icmd(priv, 0x0002a6, 0x00000000);
126 nv_icmd(priv, 0x0002a7, 0x00000000);
127 nv_icmd(priv, 0x0002a8, 0x00000000);
128 nv_icmd(priv, 0x0002a9, 0x00000000);
129 nv_icmd(priv, 0x0002aa, 0x00000000);
130 nv_icmd(priv, 0x0002ab, 0x00000000);
131 nv_icmd(priv, 0x0002ac, 0x00000000);
132 nv_icmd(priv, 0x0002ad, 0x00000000);
133 nv_icmd(priv, 0x0002ae, 0x00000000);
134 nv_icmd(priv, 0x0002af, 0x00000000);
135 nv_icmd(priv, 0x000420, 0x00000000);
136 nv_icmd(priv, 0x000421, 0x00000000);
137 nv_icmd(priv, 0x000422, 0x00000000);
138 nv_icmd(priv, 0x000423, 0x00000000);
139 nv_icmd(priv, 0x000424, 0x00000000);
140 nv_icmd(priv, 0x000425, 0x00000000);
141 nv_icmd(priv, 0x000426, 0x00000000);
142 nv_icmd(priv, 0x000427, 0x00000000);
143 nv_icmd(priv, 0x000428, 0x00000000);
144 nv_icmd(priv, 0x000429, 0x00000000);
145 nv_icmd(priv, 0x00042a, 0x00000000);
146 nv_icmd(priv, 0x00042b, 0x00000000);
147 nv_icmd(priv, 0x00042c, 0x00000000);
148 nv_icmd(priv, 0x00042d, 0x00000000);
149 nv_icmd(priv, 0x00042e, 0x00000000);
150 nv_icmd(priv, 0x00042f, 0x00000000);
151 nv_icmd(priv, 0x0002b0, 0x00000000);
152 nv_icmd(priv, 0x0002b1, 0x00000000);
153 nv_icmd(priv, 0x0002b2, 0x00000000);
154 nv_icmd(priv, 0x0002b3, 0x00000000);
155 nv_icmd(priv, 0x0002b4, 0x00000000);
156 nv_icmd(priv, 0x0002b5, 0x00000000);
157 nv_icmd(priv, 0x0002b6, 0x00000000);
158 nv_icmd(priv, 0x0002b7, 0x00000000);
159 nv_icmd(priv, 0x0002b8, 0x00000000);
160 nv_icmd(priv, 0x0002b9, 0x00000000);
161 nv_icmd(priv, 0x0002ba, 0x00000000);
162 nv_icmd(priv, 0x0002bb, 0x00000000);
163 nv_icmd(priv, 0x0002bc, 0x00000000);
164 nv_icmd(priv, 0x0002bd, 0x00000000);
165 nv_icmd(priv, 0x0002be, 0x00000000);
166 nv_icmd(priv, 0x0002bf, 0x00000000);
167 nv_icmd(priv, 0x000430, 0x00000000);
168 nv_icmd(priv, 0x000431, 0x00000000);
169 nv_icmd(priv, 0x000432, 0x00000000);
170 nv_icmd(priv, 0x000433, 0x00000000);
171 nv_icmd(priv, 0x000434, 0x00000000);
172 nv_icmd(priv, 0x000435, 0x00000000);
173 nv_icmd(priv, 0x000436, 0x00000000);
174 nv_icmd(priv, 0x000437, 0x00000000);
175 nv_icmd(priv, 0x000438, 0x00000000);
176 nv_icmd(priv, 0x000439, 0x00000000);
177 nv_icmd(priv, 0x00043a, 0x00000000);
178 nv_icmd(priv, 0x00043b, 0x00000000);
179 nv_icmd(priv, 0x00043c, 0x00000000);
180 nv_icmd(priv, 0x00043d, 0x00000000);
181 nv_icmd(priv, 0x00043e, 0x00000000);
182 nv_icmd(priv, 0x00043f, 0x00000000);
183 nv_icmd(priv, 0x0002c0, 0x00000000);
184 nv_icmd(priv, 0x0002c1, 0x00000000);
185 nv_icmd(priv, 0x0002c2, 0x00000000);
186 nv_icmd(priv, 0x0002c3, 0x00000000);
187 nv_icmd(priv, 0x0002c4, 0x00000000);
188 nv_icmd(priv, 0x0002c5, 0x00000000);
189 nv_icmd(priv, 0x0002c6, 0x00000000);
190 nv_icmd(priv, 0x0002c7, 0x00000000);
191 nv_icmd(priv, 0x0002c8, 0x00000000);
192 nv_icmd(priv, 0x0002c9, 0x00000000);
193 nv_icmd(priv, 0x0002ca, 0x00000000);
194 nv_icmd(priv, 0x0002cb, 0x00000000);
195 nv_icmd(priv, 0x0002cc, 0x00000000);
196 nv_icmd(priv, 0x0002cd, 0x00000000);
197 nv_icmd(priv, 0x0002ce, 0x00000000);
198 nv_icmd(priv, 0x0002cf, 0x00000000);
199 nv_icmd(priv, 0x0004d0, 0x00000000);
200 nv_icmd(priv, 0x0004d1, 0x00000000);
201 nv_icmd(priv, 0x0004d2, 0x00000000);
202 nv_icmd(priv, 0x0004d3, 0x00000000);
203 nv_icmd(priv, 0x0004d4, 0x00000000);
204 nv_icmd(priv, 0x0004d5, 0x00000000);
205 nv_icmd(priv, 0x0004d6, 0x00000000);
206 nv_icmd(priv, 0x0004d7, 0x00000000);
207 nv_icmd(priv, 0x0004d8, 0x00000000);
208 nv_icmd(priv, 0x0004d9, 0x00000000);
209 nv_icmd(priv, 0x0004da, 0x00000000);
210 nv_icmd(priv, 0x0004db, 0x00000000);
211 nv_icmd(priv, 0x0004dc, 0x00000000);
212 nv_icmd(priv, 0x0004dd, 0x00000000);
213 nv_icmd(priv, 0x0004de, 0x00000000);
214 nv_icmd(priv, 0x0004df, 0x00000000);
215 nv_icmd(priv, 0x000720, 0x00000000);
216 nv_icmd(priv, 0x000721, 0x00000000);
217 nv_icmd(priv, 0x000722, 0x00000000);
218 nv_icmd(priv, 0x000723, 0x00000000);
219 nv_icmd(priv, 0x000724, 0x00000000);
220 nv_icmd(priv, 0x000725, 0x00000000);
221 nv_icmd(priv, 0x000726, 0x00000000);
222 nv_icmd(priv, 0x000727, 0x00000000);
223 nv_icmd(priv, 0x000728, 0x00000000);
224 nv_icmd(priv, 0x000729, 0x00000000);
225 nv_icmd(priv, 0x00072a, 0x00000000);
226 nv_icmd(priv, 0x00072b, 0x00000000);
227 nv_icmd(priv, 0x00072c, 0x00000000);
228 nv_icmd(priv, 0x00072d, 0x00000000);
229 nv_icmd(priv, 0x00072e, 0x00000000);
230 nv_icmd(priv, 0x00072f, 0x00000000);
231 nv_icmd(priv, 0x0008c0, 0x00000000);
232 nv_icmd(priv, 0x0008c1, 0x00000000);
233 nv_icmd(priv, 0x0008c2, 0x00000000);
234 nv_icmd(priv, 0x0008c3, 0x00000000);
235 nv_icmd(priv, 0x0008c4, 0x00000000);
236 nv_icmd(priv, 0x0008c5, 0x00000000);
237 nv_icmd(priv, 0x0008c6, 0x00000000);
238 nv_icmd(priv, 0x0008c7, 0x00000000);
239 nv_icmd(priv, 0x0008c8, 0x00000000);
240 nv_icmd(priv, 0x0008c9, 0x00000000);
241 nv_icmd(priv, 0x0008ca, 0x00000000);
242 nv_icmd(priv, 0x0008cb, 0x00000000);
243 nv_icmd(priv, 0x0008cc, 0x00000000);
244 nv_icmd(priv, 0x0008cd, 0x00000000);
245 nv_icmd(priv, 0x0008ce, 0x00000000);
246 nv_icmd(priv, 0x0008cf, 0x00000000);
247 nv_icmd(priv, 0x000890, 0x00000000);
248 nv_icmd(priv, 0x000891, 0x00000000);
249 nv_icmd(priv, 0x000892, 0x00000000);
250 nv_icmd(priv, 0x000893, 0x00000000);
251 nv_icmd(priv, 0x000894, 0x00000000);
252 nv_icmd(priv, 0x000895, 0x00000000);
253 nv_icmd(priv, 0x000896, 0x00000000);
254 nv_icmd(priv, 0x000897, 0x00000000);
255 nv_icmd(priv, 0x000898, 0x00000000);
256 nv_icmd(priv, 0x000899, 0x00000000);
257 nv_icmd(priv, 0x00089a, 0x00000000);
258 nv_icmd(priv, 0x00089b, 0x00000000);
259 nv_icmd(priv, 0x00089c, 0x00000000);
260 nv_icmd(priv, 0x00089d, 0x00000000);
261 nv_icmd(priv, 0x00089e, 0x00000000);
262 nv_icmd(priv, 0x00089f, 0x00000000);
263 nv_icmd(priv, 0x0008e0, 0x00000000);
264 nv_icmd(priv, 0x0008e1, 0x00000000);
265 nv_icmd(priv, 0x0008e2, 0x00000000);
266 nv_icmd(priv, 0x0008e3, 0x00000000);
267 nv_icmd(priv, 0x0008e4, 0x00000000);
268 nv_icmd(priv, 0x0008e5, 0x00000000);
269 nv_icmd(priv, 0x0008e6, 0x00000000);
270 nv_icmd(priv, 0x0008e7, 0x00000000);
271 nv_icmd(priv, 0x0008e8, 0x00000000);
272 nv_icmd(priv, 0x0008e9, 0x00000000);
273 nv_icmd(priv, 0x0008ea, 0x00000000);
274 nv_icmd(priv, 0x0008eb, 0x00000000);
275 nv_icmd(priv, 0x0008ec, 0x00000000);
276 nv_icmd(priv, 0x0008ed, 0x00000000);
277 nv_icmd(priv, 0x0008ee, 0x00000000);
278 nv_icmd(priv, 0x0008ef, 0x00000000);
279 nv_icmd(priv, 0x0008a0, 0x00000000);
280 nv_icmd(priv, 0x0008a1, 0x00000000);
281 nv_icmd(priv, 0x0008a2, 0x00000000);
282 nv_icmd(priv, 0x0008a3, 0x00000000);
283 nv_icmd(priv, 0x0008a4, 0x00000000);
284 nv_icmd(priv, 0x0008a5, 0x00000000);
285 nv_icmd(priv, 0x0008a6, 0x00000000);
286 nv_icmd(priv, 0x0008a7, 0x00000000);
287 nv_icmd(priv, 0x0008a8, 0x00000000);
288 nv_icmd(priv, 0x0008a9, 0x00000000);
289 nv_icmd(priv, 0x0008aa, 0x00000000);
290 nv_icmd(priv, 0x0008ab, 0x00000000);
291 nv_icmd(priv, 0x0008ac, 0x00000000);
292 nv_icmd(priv, 0x0008ad, 0x00000000);
293 nv_icmd(priv, 0x0008ae, 0x00000000);
294 nv_icmd(priv, 0x0008af, 0x00000000);
295 nv_icmd(priv, 0x0008f0, 0x00000000);
296 nv_icmd(priv, 0x0008f1, 0x00000000);
297 nv_icmd(priv, 0x0008f2, 0x00000000);
298 nv_icmd(priv, 0x0008f3, 0x00000000);
299 nv_icmd(priv, 0x0008f4, 0x00000000);
300 nv_icmd(priv, 0x0008f5, 0x00000000);
301 nv_icmd(priv, 0x0008f6, 0x00000000);
302 nv_icmd(priv, 0x0008f7, 0x00000000);
303 nv_icmd(priv, 0x0008f8, 0x00000000);
304 nv_icmd(priv, 0x0008f9, 0x00000000);
305 nv_icmd(priv, 0x0008fa, 0x00000000);
306 nv_icmd(priv, 0x0008fb, 0x00000000);
307 nv_icmd(priv, 0x0008fc, 0x00000000);
308 nv_icmd(priv, 0x0008fd, 0x00000000);
309 nv_icmd(priv, 0x0008fe, 0x00000000);
310 nv_icmd(priv, 0x0008ff, 0x00000000);
311 nv_icmd(priv, 0x00094c, 0x000000ff);
312 nv_icmd(priv, 0x00094d, 0xffffffff);
313 nv_icmd(priv, 0x00094e, 0x00000002);
314 nv_icmd(priv, 0x0002ec, 0x00000001);
315 nv_icmd(priv, 0x000303, 0x00000001);
316 nv_icmd(priv, 0x0002e6, 0x00000001);
317 nv_icmd(priv, 0x000466, 0x00000052);
318 nv_icmd(priv, 0x000301, 0x3f800000);
319 nv_icmd(priv, 0x000304, 0x30201000);
320 nv_icmd(priv, 0x000305, 0x70605040);
321 nv_icmd(priv, 0x000306, 0xb8a89888);
322 nv_icmd(priv, 0x000307, 0xf8e8d8c8);
323 nv_icmd(priv, 0x00030a, 0x00ffff00);
324 nv_icmd(priv, 0x00030b, 0x0000001a);
325 nv_icmd(priv, 0x00030c, 0x00000001);
326 nv_icmd(priv, 0x000318, 0x00000001);
327 nv_icmd(priv, 0x000340, 0x00000000);
328 nv_icmd(priv, 0x000375, 0x00000001);
329 nv_icmd(priv, 0x00037d, 0x00000006);
330 nv_icmd(priv, 0x0003a0, 0x00000002);
331 nv_icmd(priv, 0x0003aa, 0x00000001);
332 nv_icmd(priv, 0x0003a9, 0x00000001);
333 nv_icmd(priv, 0x000380, 0x00000001);
334 nv_icmd(priv, 0x000383, 0x00000011);
335 nv_icmd(priv, 0x000360, 0x00000040);
336 nv_icmd(priv, 0x000366, 0x00000000);
337 nv_icmd(priv, 0x000367, 0x00000000);
338 nv_icmd(priv, 0x000368, 0x00000fff);
339 nv_icmd(priv, 0x000370, 0x00000000);
340 nv_icmd(priv, 0x000371, 0x00000000);
341 nv_icmd(priv, 0x000372, 0x000fffff);
342 nv_icmd(priv, 0x00037a, 0x00000012);
343 nv_icmd(priv, 0x000619, 0x00000003);
344 nv_icmd(priv, 0x000811, 0x00000003);
345 nv_icmd(priv, 0x000812, 0x00000004);
346 nv_icmd(priv, 0x000813, 0x00000006);
347 nv_icmd(priv, 0x000814, 0x00000008);
348 nv_icmd(priv, 0x000815, 0x0000000b);
349 nv_icmd(priv, 0x000800, 0x00000001);
350 nv_icmd(priv, 0x000801, 0x00000001);
351 nv_icmd(priv, 0x000802, 0x00000001);
352 nv_icmd(priv, 0x000803, 0x00000001);
353 nv_icmd(priv, 0x000804, 0x00000001);
354 nv_icmd(priv, 0x000805, 0x00000001);
355 nv_icmd(priv, 0x000632, 0x00000001);
356 nv_icmd(priv, 0x000633, 0x00000002);
357 nv_icmd(priv, 0x000634, 0x00000003);
358 nv_icmd(priv, 0x000635, 0x00000004);
359 nv_icmd(priv, 0x000654, 0x3f800000);
360 nv_icmd(priv, 0x000657, 0x3f800000);
361 nv_icmd(priv, 0x000655, 0x3f800000);
362 nv_icmd(priv, 0x000656, 0x3f800000);
363 nv_icmd(priv, 0x0006cd, 0x3f800000);
364 nv_icmd(priv, 0x0007f5, 0x3f800000);
365 nv_icmd(priv, 0x0007dc, 0x39291909);
366 nv_icmd(priv, 0x0007dd, 0x79695949);
367 nv_icmd(priv, 0x0007de, 0xb9a99989);
368 nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
369 nv_icmd(priv, 0x0007e8, 0x00003210);
370 nv_icmd(priv, 0x0007e9, 0x00007654);
371 nv_icmd(priv, 0x0007ea, 0x00000098);
372 nv_icmd(priv, 0x0007ec, 0x39291909);
373 nv_icmd(priv, 0x0007ed, 0x79695949);
374 nv_icmd(priv, 0x0007ee, 0xb9a99989);
375 nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
376 nv_icmd(priv, 0x0007f0, 0x00003210);
377 nv_icmd(priv, 0x0007f1, 0x00007654);
378 nv_icmd(priv, 0x0007f2, 0x00000098);
379 nv_icmd(priv, 0x0005a5, 0x00000001);
380 nv_icmd(priv, 0x000980, 0x00000000);
381 nv_icmd(priv, 0x000981, 0x00000000);
382 nv_icmd(priv, 0x000982, 0x00000000);
383 nv_icmd(priv, 0x000983, 0x00000000);
384 nv_icmd(priv, 0x000984, 0x00000000);
385 nv_icmd(priv, 0x000985, 0x00000000);
386 nv_icmd(priv, 0x000986, 0x00000000);
387 nv_icmd(priv, 0x000987, 0x00000000);
388 nv_icmd(priv, 0x000988, 0x00000000);
389 nv_icmd(priv, 0x000989, 0x00000000);
390 nv_icmd(priv, 0x00098a, 0x00000000);
391 nv_icmd(priv, 0x00098b, 0x00000000);
392 nv_icmd(priv, 0x00098c, 0x00000000);
393 nv_icmd(priv, 0x00098d, 0x00000000);
394 nv_icmd(priv, 0x00098e, 0x00000000);
395 nv_icmd(priv, 0x00098f, 0x00000000);
396 nv_icmd(priv, 0x000990, 0x00000000);
397 nv_icmd(priv, 0x000991, 0x00000000);
398 nv_icmd(priv, 0x000992, 0x00000000);
399 nv_icmd(priv, 0x000993, 0x00000000);
400 nv_icmd(priv, 0x000994, 0x00000000);
401 nv_icmd(priv, 0x000995, 0x00000000);
402 nv_icmd(priv, 0x000996, 0x00000000);
403 nv_icmd(priv, 0x000997, 0x00000000);
404 nv_icmd(priv, 0x000998, 0x00000000);
405 nv_icmd(priv, 0x000999, 0x00000000);
406 nv_icmd(priv, 0x00099a, 0x00000000);
407 nv_icmd(priv, 0x00099b, 0x00000000);
408 nv_icmd(priv, 0x00099c, 0x00000000);
409 nv_icmd(priv, 0x00099d, 0x00000000);
410 nv_icmd(priv, 0x00099e, 0x00000000);
411 nv_icmd(priv, 0x00099f, 0x00000000);
412 nv_icmd(priv, 0x0009a0, 0x00000000);
413 nv_icmd(priv, 0x0009a1, 0x00000000);
414 nv_icmd(priv, 0x0009a2, 0x00000000);
415 nv_icmd(priv, 0x0009a3, 0x00000000);
416 nv_icmd(priv, 0x0009a4, 0x00000000);
417 nv_icmd(priv, 0x0009a5, 0x00000000);
418 nv_icmd(priv, 0x0009a6, 0x00000000);
419 nv_icmd(priv, 0x0009a7, 0x00000000);
420 nv_icmd(priv, 0x0009a8, 0x00000000);
421 nv_icmd(priv, 0x0009a9, 0x00000000);
422 nv_icmd(priv, 0x0009aa, 0x00000000);
423 nv_icmd(priv, 0x0009ab, 0x00000000);
424 nv_icmd(priv, 0x0009ac, 0x00000000);
425 nv_icmd(priv, 0x0009ad, 0x00000000);
426 nv_icmd(priv, 0x0009ae, 0x00000000);
427 nv_icmd(priv, 0x0009af, 0x00000000);
428 nv_icmd(priv, 0x0009b0, 0x00000000);
429 nv_icmd(priv, 0x0009b1, 0x00000000);
430 nv_icmd(priv, 0x0009b2, 0x00000000);
431 nv_icmd(priv, 0x0009b3, 0x00000000);
432 nv_icmd(priv, 0x0009b4, 0x00000000);
433 nv_icmd(priv, 0x0009b5, 0x00000000);
434 nv_icmd(priv, 0x0009b6, 0x00000000);
435 nv_icmd(priv, 0x0009b7, 0x00000000);
436 nv_icmd(priv, 0x0009b8, 0x00000000);
437 nv_icmd(priv, 0x0009b9, 0x00000000);
438 nv_icmd(priv, 0x0009ba, 0x00000000);
439 nv_icmd(priv, 0x0009bb, 0x00000000);
440 nv_icmd(priv, 0x0009bc, 0x00000000);
441 nv_icmd(priv, 0x0009bd, 0x00000000);
442 nv_icmd(priv, 0x0009be, 0x00000000);
443 nv_icmd(priv, 0x0009bf, 0x00000000);
444 nv_icmd(priv, 0x0009c0, 0x00000000);
445 nv_icmd(priv, 0x0009c1, 0x00000000);
446 nv_icmd(priv, 0x0009c2, 0x00000000);
447 nv_icmd(priv, 0x0009c3, 0x00000000);
448 nv_icmd(priv, 0x0009c4, 0x00000000);
449 nv_icmd(priv, 0x0009c5, 0x00000000);
450 nv_icmd(priv, 0x0009c6, 0x00000000);
451 nv_icmd(priv, 0x0009c7, 0x00000000);
452 nv_icmd(priv, 0x0009c8, 0x00000000);
453 nv_icmd(priv, 0x0009c9, 0x00000000);
454 nv_icmd(priv, 0x0009ca, 0x00000000);
455 nv_icmd(priv, 0x0009cb, 0x00000000);
456 nv_icmd(priv, 0x0009cc, 0x00000000);
457 nv_icmd(priv, 0x0009cd, 0x00000000);
458 nv_icmd(priv, 0x0009ce, 0x00000000);
459 nv_icmd(priv, 0x0009cf, 0x00000000);
460 nv_icmd(priv, 0x0009d0, 0x00000000);
461 nv_icmd(priv, 0x0009d1, 0x00000000);
462 nv_icmd(priv, 0x0009d2, 0x00000000);
463 nv_icmd(priv, 0x0009d3, 0x00000000);
464 nv_icmd(priv, 0x0009d4, 0x00000000);
465 nv_icmd(priv, 0x0009d5, 0x00000000);
466 nv_icmd(priv, 0x0009d6, 0x00000000);
467 nv_icmd(priv, 0x0009d7, 0x00000000);
468 nv_icmd(priv, 0x0009d8, 0x00000000);
469 nv_icmd(priv, 0x0009d9, 0x00000000);
470 nv_icmd(priv, 0x0009da, 0x00000000);
471 nv_icmd(priv, 0x0009db, 0x00000000);
472 nv_icmd(priv, 0x0009dc, 0x00000000);
473 nv_icmd(priv, 0x0009dd, 0x00000000);
474 nv_icmd(priv, 0x0009de, 0x00000000);
475 nv_icmd(priv, 0x0009df, 0x00000000);
476 nv_icmd(priv, 0x0009e0, 0x00000000);
477 nv_icmd(priv, 0x0009e1, 0x00000000);
478 nv_icmd(priv, 0x0009e2, 0x00000000);
479 nv_icmd(priv, 0x0009e3, 0x00000000);
480 nv_icmd(priv, 0x0009e4, 0x00000000);
481 nv_icmd(priv, 0x0009e5, 0x00000000);
482 nv_icmd(priv, 0x0009e6, 0x00000000);
483 nv_icmd(priv, 0x0009e7, 0x00000000);
484 nv_icmd(priv, 0x0009e8, 0x00000000);
485 nv_icmd(priv, 0x0009e9, 0x00000000);
486 nv_icmd(priv, 0x0009ea, 0x00000000);
487 nv_icmd(priv, 0x0009eb, 0x00000000);
488 nv_icmd(priv, 0x0009ec, 0x00000000);
489 nv_icmd(priv, 0x0009ed, 0x00000000);
490 nv_icmd(priv, 0x0009ee, 0x00000000);
491 nv_icmd(priv, 0x0009ef, 0x00000000);
492 nv_icmd(priv, 0x0009f0, 0x00000000);
493 nv_icmd(priv, 0x0009f1, 0x00000000);
494 nv_icmd(priv, 0x0009f2, 0x00000000);
495 nv_icmd(priv, 0x0009f3, 0x00000000);
496 nv_icmd(priv, 0x0009f4, 0x00000000);
497 nv_icmd(priv, 0x0009f5, 0x00000000);
498 nv_icmd(priv, 0x0009f6, 0x00000000);
499 nv_icmd(priv, 0x0009f7, 0x00000000);
500 nv_icmd(priv, 0x0009f8, 0x00000000);
501 nv_icmd(priv, 0x0009f9, 0x00000000);
502 nv_icmd(priv, 0x0009fa, 0x00000000);
503 nv_icmd(priv, 0x0009fb, 0x00000000);
504 nv_icmd(priv, 0x0009fc, 0x00000000);
505 nv_icmd(priv, 0x0009fd, 0x00000000);
506 nv_icmd(priv, 0x0009fe, 0x00000000);
507 nv_icmd(priv, 0x0009ff, 0x00000000);
508 nv_icmd(priv, 0x000468, 0x00000004);
509 nv_icmd(priv, 0x00046c, 0x00000001);
510 nv_icmd(priv, 0x000470, 0x00000000);
511 nv_icmd(priv, 0x000471, 0x00000000);
512 nv_icmd(priv, 0x000472, 0x00000000);
513 nv_icmd(priv, 0x000473, 0x00000000);
514 nv_icmd(priv, 0x000474, 0x00000000);
515 nv_icmd(priv, 0x000475, 0x00000000);
516 nv_icmd(priv, 0x000476, 0x00000000);
517 nv_icmd(priv, 0x000477, 0x00000000);
518 nv_icmd(priv, 0x000478, 0x00000000);
519 nv_icmd(priv, 0x000479, 0x00000000);
520 nv_icmd(priv, 0x00047a, 0x00000000);
521 nv_icmd(priv, 0x00047b, 0x00000000);
522 nv_icmd(priv, 0x00047c, 0x00000000);
523 nv_icmd(priv, 0x00047d, 0x00000000);
524 nv_icmd(priv, 0x00047e, 0x00000000);
525 nv_icmd(priv, 0x00047f, 0x00000000);
526 nv_icmd(priv, 0x000480, 0x00000000);
527 nv_icmd(priv, 0x000481, 0x00000000);
528 nv_icmd(priv, 0x000482, 0x00000000);
529 nv_icmd(priv, 0x000483, 0x00000000);
530 nv_icmd(priv, 0x000484, 0x00000000);
531 nv_icmd(priv, 0x000485, 0x00000000);
532 nv_icmd(priv, 0x000486, 0x00000000);
533 nv_icmd(priv, 0x000487, 0x00000000);
534 nv_icmd(priv, 0x000488, 0x00000000);
535 nv_icmd(priv, 0x000489, 0x00000000);
536 nv_icmd(priv, 0x00048a, 0x00000000);
537 nv_icmd(priv, 0x00048b, 0x00000000);
538 nv_icmd(priv, 0x00048c, 0x00000000);
539 nv_icmd(priv, 0x00048d, 0x00000000);
540 nv_icmd(priv, 0x00048e, 0x00000000);
541 nv_icmd(priv, 0x00048f, 0x00000000);
542 nv_icmd(priv, 0x000490, 0x00000000);
543 nv_icmd(priv, 0x000491, 0x00000000);
544 nv_icmd(priv, 0x000492, 0x00000000);
545 nv_icmd(priv, 0x000493, 0x00000000);
546 nv_icmd(priv, 0x000494, 0x00000000);
547 nv_icmd(priv, 0x000495, 0x00000000);
548 nv_icmd(priv, 0x000496, 0x00000000);
549 nv_icmd(priv, 0x000497, 0x00000000);
550 nv_icmd(priv, 0x000498, 0x00000000);
551 nv_icmd(priv, 0x000499, 0x00000000);
552 nv_icmd(priv, 0x00049a, 0x00000000);
553 nv_icmd(priv, 0x00049b, 0x00000000);
554 nv_icmd(priv, 0x00049c, 0x00000000);
555 nv_icmd(priv, 0x00049d, 0x00000000);
556 nv_icmd(priv, 0x00049e, 0x00000000);
557 nv_icmd(priv, 0x00049f, 0x00000000);
558 nv_icmd(priv, 0x0004a0, 0x00000000);
559 nv_icmd(priv, 0x0004a1, 0x00000000);
560 nv_icmd(priv, 0x0004a2, 0x00000000);
561 nv_icmd(priv, 0x0004a3, 0x00000000);
562 nv_icmd(priv, 0x0004a4, 0x00000000);
563 nv_icmd(priv, 0x0004a5, 0x00000000);
564 nv_icmd(priv, 0x0004a6, 0x00000000);
565 nv_icmd(priv, 0x0004a7, 0x00000000);
566 nv_icmd(priv, 0x0004a8, 0x00000000);
567 nv_icmd(priv, 0x0004a9, 0x00000000);
568 nv_icmd(priv, 0x0004aa, 0x00000000);
569 nv_icmd(priv, 0x0004ab, 0x00000000);
570 nv_icmd(priv, 0x0004ac, 0x00000000);
571 nv_icmd(priv, 0x0004ad, 0x00000000);
572 nv_icmd(priv, 0x0004ae, 0x00000000);
573 nv_icmd(priv, 0x0004af, 0x00000000);
574 nv_icmd(priv, 0x0004b0, 0x00000000);
575 nv_icmd(priv, 0x0004b1, 0x00000000);
576 nv_icmd(priv, 0x0004b2, 0x00000000);
577 nv_icmd(priv, 0x0004b3, 0x00000000);
578 nv_icmd(priv, 0x0004b4, 0x00000000);
579 nv_icmd(priv, 0x0004b5, 0x00000000);
580 nv_icmd(priv, 0x0004b6, 0x00000000);
581 nv_icmd(priv, 0x0004b7, 0x00000000);
582 nv_icmd(priv, 0x0004b8, 0x00000000);
583 nv_icmd(priv, 0x0004b9, 0x00000000);
584 nv_icmd(priv, 0x0004ba, 0x00000000);
585 nv_icmd(priv, 0x0004bb, 0x00000000);
586 nv_icmd(priv, 0x0004bc, 0x00000000);
587 nv_icmd(priv, 0x0004bd, 0x00000000);
588 nv_icmd(priv, 0x0004be, 0x00000000);
589 nv_icmd(priv, 0x0004bf, 0x00000000);
590 nv_icmd(priv, 0x0004c0, 0x00000000);
591 nv_icmd(priv, 0x0004c1, 0x00000000);
592 nv_icmd(priv, 0x0004c2, 0x00000000);
593 nv_icmd(priv, 0x0004c3, 0x00000000);
594 nv_icmd(priv, 0x0004c4, 0x00000000);
595 nv_icmd(priv, 0x0004c5, 0x00000000);
596 nv_icmd(priv, 0x0004c6, 0x00000000);
597 nv_icmd(priv, 0x0004c7, 0x00000000);
598 nv_icmd(priv, 0x0004c8, 0x00000000);
599 nv_icmd(priv, 0x0004c9, 0x00000000);
600 nv_icmd(priv, 0x0004ca, 0x00000000);
601 nv_icmd(priv, 0x0004cb, 0x00000000);
602 nv_icmd(priv, 0x0004cc, 0x00000000);
603 nv_icmd(priv, 0x0004cd, 0x00000000);
604 nv_icmd(priv, 0x0004ce, 0x00000000);
605 nv_icmd(priv, 0x0004cf, 0x00000000);
606 nv_icmd(priv, 0x000510, 0x3f800000);
607 nv_icmd(priv, 0x000511, 0x3f800000);
608 nv_icmd(priv, 0x000512, 0x3f800000);
609 nv_icmd(priv, 0x000513, 0x3f800000);
610 nv_icmd(priv, 0x000514, 0x3f800000);
611 nv_icmd(priv, 0x000515, 0x3f800000);
612 nv_icmd(priv, 0x000516, 0x3f800000);
613 nv_icmd(priv, 0x000517, 0x3f800000);
614 nv_icmd(priv, 0x000518, 0x3f800000);
615 nv_icmd(priv, 0x000519, 0x3f800000);
616 nv_icmd(priv, 0x00051a, 0x3f800000);
617 nv_icmd(priv, 0x00051b, 0x3f800000);
618 nv_icmd(priv, 0x00051c, 0x3f800000);
619 nv_icmd(priv, 0x00051d, 0x3f800000);
620 nv_icmd(priv, 0x00051e, 0x3f800000);
621 nv_icmd(priv, 0x00051f, 0x3f800000);
622 nv_icmd(priv, 0x000520, 0x000002b6);
623 nv_icmd(priv, 0x000529, 0x00000001);
624 nv_icmd(priv, 0x000530, 0xffff0000);
625 nv_icmd(priv, 0x000531, 0xffff0000);
626 nv_icmd(priv, 0x000532, 0xffff0000);
627 nv_icmd(priv, 0x000533, 0xffff0000);
628 nv_icmd(priv, 0x000534, 0xffff0000);
629 nv_icmd(priv, 0x000535, 0xffff0000);
630 nv_icmd(priv, 0x000536, 0xffff0000);
631 nv_icmd(priv, 0x000537, 0xffff0000);
632 nv_icmd(priv, 0x000538, 0xffff0000);
633 nv_icmd(priv, 0x000539, 0xffff0000);
634 nv_icmd(priv, 0x00053a, 0xffff0000);
635 nv_icmd(priv, 0x00053b, 0xffff0000);
636 nv_icmd(priv, 0x00053c, 0xffff0000);
637 nv_icmd(priv, 0x00053d, 0xffff0000);
638 nv_icmd(priv, 0x00053e, 0xffff0000);
639 nv_icmd(priv, 0x00053f, 0xffff0000);
640 nv_icmd(priv, 0x000585, 0x0000003f);
641 nv_icmd(priv, 0x000576, 0x00000003);
642 nv_icmd(priv, 0x00057b, 0x00000059);
643 nv_icmd(priv, 0x000586, 0x00000040);
644 nv_icmd(priv, 0x000582, 0x00000080);
645 nv_icmd(priv, 0x000583, 0x00000080);
646 nv_icmd(priv, 0x0005c2, 0x00000001);
647 nv_icmd(priv, 0x000638, 0x00000001);
648 nv_icmd(priv, 0x000639, 0x00000001);
649 nv_icmd(priv, 0x00063a, 0x00000002);
650 nv_icmd(priv, 0x00063b, 0x00000001);
651 nv_icmd(priv, 0x00063c, 0x00000001);
652 nv_icmd(priv, 0x00063d, 0x00000002);
653 nv_icmd(priv, 0x00063e, 0x00000001);
654 nv_icmd(priv, 0x0008b8, 0x00000001);
655 nv_icmd(priv, 0x0008b9, 0x00000001);
656 nv_icmd(priv, 0x0008ba, 0x00000001);
657 nv_icmd(priv, 0x0008bb, 0x00000001);
658 nv_icmd(priv, 0x0008bc, 0x00000001);
659 nv_icmd(priv, 0x0008bd, 0x00000001);
660 nv_icmd(priv, 0x0008be, 0x00000001);
661 nv_icmd(priv, 0x0008bf, 0x00000001);
662 nv_icmd(priv, 0x000900, 0x00000001);
663 nv_icmd(priv, 0x000901, 0x00000001);
664 nv_icmd(priv, 0x000902, 0x00000001);
665 nv_icmd(priv, 0x000903, 0x00000001);
666 nv_icmd(priv, 0x000904, 0x00000001);
667 nv_icmd(priv, 0x000905, 0x00000001);
668 nv_icmd(priv, 0x000906, 0x00000001);
669 nv_icmd(priv, 0x000907, 0x00000001);
670 nv_icmd(priv, 0x000908, 0x00000002);
671 nv_icmd(priv, 0x000909, 0x00000002);
672 nv_icmd(priv, 0x00090a, 0x00000002);
673 nv_icmd(priv, 0x00090b, 0x00000002);
674 nv_icmd(priv, 0x00090c, 0x00000002);
675 nv_icmd(priv, 0x00090d, 0x00000002);
676 nv_icmd(priv, 0x00090e, 0x00000002);
677 nv_icmd(priv, 0x00090f, 0x00000002);
678 nv_icmd(priv, 0x000910, 0x00000001);
679 nv_icmd(priv, 0x000911, 0x00000001);
680 nv_icmd(priv, 0x000912, 0x00000001);
681 nv_icmd(priv, 0x000913, 0x00000001);
682 nv_icmd(priv, 0x000914, 0x00000001);
683 nv_icmd(priv, 0x000915, 0x00000001);
684 nv_icmd(priv, 0x000916, 0x00000001);
685 nv_icmd(priv, 0x000917, 0x00000001);
686 nv_icmd(priv, 0x000918, 0x00000001);
687 nv_icmd(priv, 0x000919, 0x00000001);
688 nv_icmd(priv, 0x00091a, 0x00000001);
689 nv_icmd(priv, 0x00091b, 0x00000001);
690 nv_icmd(priv, 0x00091c, 0x00000001);
691 nv_icmd(priv, 0x00091d, 0x00000001);
692 nv_icmd(priv, 0x00091e, 0x00000001);
693 nv_icmd(priv, 0x00091f, 0x00000001);
694 nv_icmd(priv, 0x000920, 0x00000002);
695 nv_icmd(priv, 0x000921, 0x00000002);
696 nv_icmd(priv, 0x000922, 0x00000002);
697 nv_icmd(priv, 0x000923, 0x00000002);
698 nv_icmd(priv, 0x000924, 0x00000002);
699 nv_icmd(priv, 0x000925, 0x00000002);
700 nv_icmd(priv, 0x000926, 0x00000002);
701 nv_icmd(priv, 0x000927, 0x00000002);
702 nv_icmd(priv, 0x000928, 0x00000001);
703 nv_icmd(priv, 0x000929, 0x00000001);
704 nv_icmd(priv, 0x00092a, 0x00000001);
705 nv_icmd(priv, 0x00092b, 0x00000001);
706 nv_icmd(priv, 0x00092c, 0x00000001);
707 nv_icmd(priv, 0x00092d, 0x00000001);
708 nv_icmd(priv, 0x00092e, 0x00000001);
709 nv_icmd(priv, 0x00092f, 0x00000001);
710 nv_icmd(priv, 0x000648, 0x00000001);
711 nv_icmd(priv, 0x000649, 0x00000001);
712 nv_icmd(priv, 0x00064a, 0x00000001);
713 nv_icmd(priv, 0x00064b, 0x00000001);
714 nv_icmd(priv, 0x00064c, 0x00000001);
715 nv_icmd(priv, 0x00064d, 0x00000001);
716 nv_icmd(priv, 0x00064e, 0x00000001);
717 nv_icmd(priv, 0x00064f, 0x00000001);
718 nv_icmd(priv, 0x000650, 0x00000001);
719 nv_icmd(priv, 0x000658, 0x0000000f);
720 nv_icmd(priv, 0x0007ff, 0x0000000a);
721 nv_icmd(priv, 0x00066a, 0x40000000);
722 nv_icmd(priv, 0x00066b, 0x10000000);
723 nv_icmd(priv, 0x00066c, 0xffff0000);
724 nv_icmd(priv, 0x00066d, 0xffff0000);
725 nv_icmd(priv, 0x0007af, 0x00000008);
726 nv_icmd(priv, 0x0007b0, 0x00000008);
727 nv_icmd(priv, 0x0007f6, 0x00000001);
728 nv_icmd(priv, 0x0006b2, 0x00000055);
729 nv_icmd(priv, 0x0007ad, 0x00000003);
730 nv_icmd(priv, 0x000937, 0x00000001);
731 nv_icmd(priv, 0x000971, 0x00000008);
732 nv_icmd(priv, 0x000972, 0x00000040);
733 nv_icmd(priv, 0x000973, 0x0000012c);
734 nv_icmd(priv, 0x00097c, 0x00000040);
735 nv_icmd(priv, 0x000979, 0x00000003);
736 nv_icmd(priv, 0x000975, 0x00000020);
737 nv_icmd(priv, 0x000976, 0x00000001);
738 nv_icmd(priv, 0x000977, 0x00000020);
739 nv_icmd(priv, 0x000978, 0x00000001);
740 nv_icmd(priv, 0x000957, 0x00000003);
741 nv_icmd(priv, 0x00095e, 0x20164010);
742 nv_icmd(priv, 0x00095f, 0x00000020);
743 nv_icmd(priv, 0x00097d, 0x00000020);
744 nv_icmd(priv, 0x000683, 0x00000006);
745 nv_icmd(priv, 0x000685, 0x003fffff);
746 nv_icmd(priv, 0x000687, 0x003fffff);
747 nv_icmd(priv, 0x0006a0, 0x00000005);
748 nv_icmd(priv, 0x000840, 0x00400008);
749 nv_icmd(priv, 0x000841, 0x08000080);
750 nv_icmd(priv, 0x000842, 0x00400008);
751 nv_icmd(priv, 0x000843, 0x08000080);
752 nv_icmd(priv, 0x000818, 0x00000000);
753 nv_icmd(priv, 0x000819, 0x00000000);
754 nv_icmd(priv, 0x00081a, 0x00000000);
755 nv_icmd(priv, 0x00081b, 0x00000000);
756 nv_icmd(priv, 0x00081c, 0x00000000);
757 nv_icmd(priv, 0x00081d, 0x00000000);
758 nv_icmd(priv, 0x00081e, 0x00000000);
759 nv_icmd(priv, 0x00081f, 0x00000000);
760 nv_icmd(priv, 0x000848, 0x00000000);
761 nv_icmd(priv, 0x000849, 0x00000000);
762 nv_icmd(priv, 0x00084a, 0x00000000);
763 nv_icmd(priv, 0x00084b, 0x00000000);
764 nv_icmd(priv, 0x00084c, 0x00000000);
765 nv_icmd(priv, 0x00084d, 0x00000000);
766 nv_icmd(priv, 0x00084e, 0x00000000);
767 nv_icmd(priv, 0x00084f, 0x00000000);
768 nv_icmd(priv, 0x000850, 0x00000000);
769 nv_icmd(priv, 0x000851, 0x00000000);
770 nv_icmd(priv, 0x000852, 0x00000000);
771 nv_icmd(priv, 0x000853, 0x00000000);
772 nv_icmd(priv, 0x000854, 0x00000000);
773 nv_icmd(priv, 0x000855, 0x00000000);
774 nv_icmd(priv, 0x000856, 0x00000000);
775 nv_icmd(priv, 0x000857, 0x00000000);
776 nv_icmd(priv, 0x000738, 0x00000000);
777 nv_icmd(priv, 0x0006aa, 0x00000001);
778 nv_icmd(priv, 0x0006ab, 0x00000002);
779 nv_icmd(priv, 0x0006ac, 0x00000080);
780 nv_icmd(priv, 0x0006ad, 0x00000100);
781 nv_icmd(priv, 0x0006ae, 0x00000100);
782 nv_icmd(priv, 0x0006b1, 0x00000011);
783 nv_icmd(priv, 0x0006bb, 0x000000cf);
784 nv_icmd(priv, 0x0006ce, 0x2a712488);
785 nv_icmd(priv, 0x000739, 0x4085c000);
786 nv_icmd(priv, 0x00073a, 0x00000080);
787 nv_icmd(priv, 0x000786, 0x80000100);
788 nv_icmd(priv, 0x00073c, 0x00010100);
789 nv_icmd(priv, 0x00073d, 0x02800000);
790 nv_icmd(priv, 0x000787, 0x000000cf);
791 nv_icmd(priv, 0x00078c, 0x00000008);
792 nv_icmd(priv, 0x000792, 0x00000001);
793 nv_icmd(priv, 0x000794, 0x00000001);
794 nv_icmd(priv, 0x000795, 0x00000001);
795 nv_icmd(priv, 0x000796, 0x00000001);
796 nv_icmd(priv, 0x000797, 0x000000cf);
797 nv_icmd(priv, 0x000836, 0x00000001);
798 nv_icmd(priv, 0x00079a, 0x00000002);
799 nv_icmd(priv, 0x000833, 0x04444480);
800 nv_icmd(priv, 0x0007a1, 0x00000001);
801 nv_icmd(priv, 0x0007a3, 0x00000001);
802 nv_icmd(priv, 0x0007a4, 0x00000001);
803 nv_icmd(priv, 0x0007a5, 0x00000001);
804 nv_icmd(priv, 0x000831, 0x00000004);
805 nv_icmd(priv, 0x000b07, 0x00000002);
806 nv_icmd(priv, 0x000b08, 0x00000100);
807 nv_icmd(priv, 0x000b09, 0x00000100);
808 nv_icmd(priv, 0x000b0a, 0x00000001);
809 nv_icmd(priv, 0x000a04, 0x000000ff);
810 nv_icmd(priv, 0x000a0b, 0x00000040);
811 nv_icmd(priv, 0x00097f, 0x00000100);
812 nv_icmd(priv, 0x000a02, 0x00000001);
813 nv_icmd(priv, 0x000809, 0x00000007);
814 nv_icmd(priv, 0x00c221, 0x00000040);
815 nv_icmd(priv, 0x00c1b0, 0x0000000f);
816 nv_icmd(priv, 0x00c1b1, 0x0000000f);
817 nv_icmd(priv, 0x00c1b2, 0x0000000f);
818 nv_icmd(priv, 0x00c1b3, 0x0000000f);
819 nv_icmd(priv, 0x00c1b4, 0x0000000f);
820 nv_icmd(priv, 0x00c1b5, 0x0000000f);
821 nv_icmd(priv, 0x00c1b6, 0x0000000f);
822 nv_icmd(priv, 0x00c1b7, 0x0000000f);
823 nv_icmd(priv, 0x00c1b8, 0x0fac6881);
824 nv_icmd(priv, 0x00c1b9, 0x00fac688);
825 nv_icmd(priv, 0x00c401, 0x00000001);
826 nv_icmd(priv, 0x00c402, 0x00010001);
827 nv_icmd(priv, 0x00c403, 0x00000001);
828 nv_icmd(priv, 0x00c404, 0x00000001);
829 nv_icmd(priv, 0x00c40e, 0x00000020);
830 nv_icmd(priv, 0x00c500, 0x00000003);
831 nv_icmd(priv, 0x01e100, 0x00000001);
832 nv_icmd(priv, 0x001000, 0x00000002);
833 nv_icmd(priv, 0x0006aa, 0x00000001);
834 nv_icmd(priv, 0x0006ad, 0x00000100);
835 nv_icmd(priv, 0x0006ae, 0x00000100);
836 nv_icmd(priv, 0x0006b1, 0x00000011);
837 nv_icmd(priv, 0x00078c, 0x00000008);
838 nv_icmd(priv, 0x000792, 0x00000001);
839 nv_icmd(priv, 0x000794, 0x00000001);
840 nv_icmd(priv, 0x000795, 0x00000001);
841 nv_icmd(priv, 0x000796, 0x00000001);
842 nv_icmd(priv, 0x000797, 0x000000cf);
843 nv_icmd(priv, 0x00079a, 0x00000002);
844 nv_icmd(priv, 0x000833, 0x04444480);
845 nv_icmd(priv, 0x0007a1, 0x00000001);
846 nv_icmd(priv, 0x0007a3, 0x00000001);
847 nv_icmd(priv, 0x0007a4, 0x00000001);
848 nv_icmd(priv, 0x0007a5, 0x00000001);
849 nv_icmd(priv, 0x000831, 0x00000004);
850 nv_icmd(priv, 0x01e100, 0x00000001);
851 nv_icmd(priv, 0x001000, 0x00000008);
852 nv_icmd(priv, 0x000039, 0x00000000);
853 nv_icmd(priv, 0x00003a, 0x00000000);
854 nv_icmd(priv, 0x00003b, 0x00000000);
855 nv_icmd(priv, 0x000380, 0x00000001);
856 nv_icmd(priv, 0x000366, 0x00000000);
857 nv_icmd(priv, 0x000367, 0x00000000);
858 nv_icmd(priv, 0x000368, 0x00000fff);
859 nv_icmd(priv, 0x000370, 0x00000000);
860 nv_icmd(priv, 0x000371, 0x00000000);
861 nv_icmd(priv, 0x000372, 0x000fffff);
862 nv_icmd(priv, 0x000813, 0x00000006);
863 nv_icmd(priv, 0x000814, 0x00000008);
864 nv_icmd(priv, 0x000957, 0x00000003);
865 nv_icmd(priv, 0x000818, 0x00000000);
866 nv_icmd(priv, 0x000819, 0x00000000);
867 nv_icmd(priv, 0x00081a, 0x00000000);
868 nv_icmd(priv, 0x00081b, 0x00000000);
869 nv_icmd(priv, 0x00081c, 0x00000000);
870 nv_icmd(priv, 0x00081d, 0x00000000);
871 nv_icmd(priv, 0x00081e, 0x00000000);
872 nv_icmd(priv, 0x00081f, 0x00000000);
873 nv_icmd(priv, 0x000848, 0x00000000);
874 nv_icmd(priv, 0x000849, 0x00000000);
875 nv_icmd(priv, 0x00084a, 0x00000000);
876 nv_icmd(priv, 0x00084b, 0x00000000);
877 nv_icmd(priv, 0x00084c, 0x00000000);
878 nv_icmd(priv, 0x00084d, 0x00000000);
879 nv_icmd(priv, 0x00084e, 0x00000000);
880 nv_icmd(priv, 0x00084f, 0x00000000);
881 nv_icmd(priv, 0x000850, 0x00000000);
882 nv_icmd(priv, 0x000851, 0x00000000);
883 nv_icmd(priv, 0x000852, 0x00000000);
884 nv_icmd(priv, 0x000853, 0x00000000);
885 nv_icmd(priv, 0x000854, 0x00000000);
886 nv_icmd(priv, 0x000855, 0x00000000);
887 nv_icmd(priv, 0x000856, 0x00000000);
888 nv_icmd(priv, 0x000857, 0x00000000);
889 nv_icmd(priv, 0x000738, 0x00000000);
890 nv_icmd(priv, 0x000b07, 0x00000002);
891 nv_icmd(priv, 0x000b08, 0x00000100);
892 nv_icmd(priv, 0x000b09, 0x00000100);
893 nv_icmd(priv, 0x000b0a, 0x00000001);
894 nv_icmd(priv, 0x000a04, 0x000000ff);
895 nv_icmd(priv, 0x00097f, 0x00000100);
896 nv_icmd(priv, 0x000a02, 0x00000001);
897 nv_icmd(priv, 0x000809, 0x00000007);
898 nv_icmd(priv, 0x00c221, 0x00000040);
899 nv_icmd(priv, 0x00c401, 0x00000001);
900 nv_icmd(priv, 0x00c402, 0x00010001);
901 nv_icmd(priv, 0x00c403, 0x00000001);
902 nv_icmd(priv, 0x00c404, 0x00000001);
903 nv_icmd(priv, 0x00c40e, 0x00000020);
904 nv_icmd(priv, 0x00c500, 0x00000003);
905 nv_icmd(priv, 0x01e100, 0x00000001);
906 nv_icmd(priv, 0x001000, 0x00000001);
907 nv_icmd(priv, 0x000b07, 0x00000002);
908 nv_icmd(priv, 0x000b08, 0x00000100);
909 nv_icmd(priv, 0x000b09, 0x00000100);
910 nv_icmd(priv, 0x000b0a, 0x00000001);
911 nv_icmd(priv, 0x01e100, 0x00000001);
912 nv_wr32(priv, 0x400208, 0x00000000);
913}
914
915static void
916nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
917{
918 nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
919 nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
920 nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
921 nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
922 nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
923 nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
924 nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
925 nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
926 nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
927 nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
928 nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
929 nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
930 nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
931 nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
932 nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
933 nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
934 nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
935 nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
936 nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
937 nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
938 nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
939 nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
940 nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
941 nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
942 nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
943 nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
944 nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
945 nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
946 nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
947 nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
948 nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
949 nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
950 nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
951 nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
952 nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
953 nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
954 nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
955 nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
956 nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
957 nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
958 nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
959 nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
960 nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
961 nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
962 nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
963 nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
964 nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
965 nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
966 nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
967 nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
968 nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
969 nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
970 nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
971 nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
972 nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
973 nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
974 nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
975 nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
976 nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
977 nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
978 nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
979 nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
980 nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
981 nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
982 nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
983 nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
984 nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
985 nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
986 nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
987 nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
988 nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
989 nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
990 nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
991 nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
992 nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
993 nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
994 nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
995 nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
996 nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
997 nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
998 nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
999 nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
1000 nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
1001 nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
1002 nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
1003 nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
1004 nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
1005 nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
1006 nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
1007 nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
1008 nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
1009 nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
1010 nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
1011 nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
1012 nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
1013 nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
1014 nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
1015 nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
1016 nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
1017 nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
1018 nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
1019 nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
1020 nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
1021 nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
1022 nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
1023 nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
1024 nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
1025 nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
1026 nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
1027 nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
1028 nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
1029 nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
1030 nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
1031 nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
1032 nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
1033 nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
1034 nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
1035 nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
1036 nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
1037 nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
1038 nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
1039 nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
1040 nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
1041 nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
1042 nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
1043 nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
1044 nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
1045 nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
1046 nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
1047 nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
1048 nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
1049 nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
1050 nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
1051 nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
1052 nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
1053 nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
1054 nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
1055 nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
1056 nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
1057 nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
1058 nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
1059 nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
1060 nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
1061 nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
1062 nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
1063 nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
1064 nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
1065 nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
1066 nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
1067 nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
1068 nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
1069 nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
1070 nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
1071 nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
1072 nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
1073 nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
1074 nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
1075 nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
1076 nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
1077 nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
1078 nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
1079 nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
1080 nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
1081 nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
1082 nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
1083 nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
1084 nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
1085 nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
1086 nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
1087 nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
1088 nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
1089 nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
1090 nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
1091 nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
1092 nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
1093 nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
1094 nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
1095 nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
1096 nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
1097 nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
1098 nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
1099 nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
1100 nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
1101 nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
1102 nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
1103 nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
1104 nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
1105 nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
1106 nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
1107 nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
1108 nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
1109 nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
1110 nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
1111 nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
1112 nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
1113 nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
1114 nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
1115 nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
1116 nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
1117 nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
1118 nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
1119 nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
1120 nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
1121 nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
1122 nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
1123 nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
1124 nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
1125 nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
1126 nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
1127 nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
1128 nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
1129 nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
1130 nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
1131 nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
1132 nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
1133 nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
1134 nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
1135 nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
1136 nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
1137 nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
1138 nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
1139 nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
1140 nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
1141 nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
1142 nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
1143 nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
1144 nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
1145 nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
1146 nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
1147 nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
1148 nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
1149 nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
1150 nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
1151 nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
1152 nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
1153 nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
1154 nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
1155 nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
1156 nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
1157 nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
1158 nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
1159 nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
1160 nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
1161 nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
1162 nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
1163 nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
1164 nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
1165 nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
1166 nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
1167 nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
1168 nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
1169 nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
1170 nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
1171 nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
1172 nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
1173 nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
1174 nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
1175 nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
1176 nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
1177 nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
1178 nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
1179 nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
1180 nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
1181 nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
1182 nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
1183 nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
1184 nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
1185 nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
1186 nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
1187 nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
1188 nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
1189 nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
1190 nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
1191 nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
1192 nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
1193 nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
1194 nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
1195 nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
1196 nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
1197 nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
1198 nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
1199 nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
1200 nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
1201 nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
1202 nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
1203 nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
1204 nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
1205 nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
1206 nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
1207 nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
1208 nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
1209 nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
1210 nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
1211 nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
1212 nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
1213 nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
1214 nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
1215 nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
1216 nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
1217 nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
1218 nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
1219 nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
1220 nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
1221 nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
1222 nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
1223 nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
1224 nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
1225 nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
1226 nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
1227 nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
1228 nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
1229 nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
1230 nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
1231 nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
1232 nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
1233 nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
1234 nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
1235 nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
1236 nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
1237 nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
1238 nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
1239 nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
1240 nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
1241 nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
1242 nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
1243 nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
1244 nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
1245 nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
1246 nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
1247 nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
1248 nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
1249 nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
1250 nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
1251 nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
1252 nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
1253 nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
1254 nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
1255 nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
1256 nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
1257 nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
1258 nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
1259 nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
1260 nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
1261 nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
1262 nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
1263 nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
1264 nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
1265 nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
1266 nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
1267 nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
1268 nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
1269 nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
1270 nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
1271 nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
1272 nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
1273 nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
1274 nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
1275 nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
1276 nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
1277 nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
1278 nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
1279 nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
1280 nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
1281 nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
1282 nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
1283 nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
1284 nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
1285 nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
1286 nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
1287 nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
1288 nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
1289 nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
1290 nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
1291 nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
1292 nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
1293 nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
1294 nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
1295 nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
1296 nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
1297 nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
1298 nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
1299 nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
1300 nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
1301 nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
1302 nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
1303 nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
1304 nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
1305 nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
1306 nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
1307 nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
1308 nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
1309 nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
1310 nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
1311 nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
1312 nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
1313 nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
1314 nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
1315 nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
1316 nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
1317 nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
1318 nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
1319 nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
1320 nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
1321 nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
1322 nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
1323 nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
1324 nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
1325 nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
1326 nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
1327 nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
1328 nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
1329 nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
1330 nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
1331 nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
1332 nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
1333 nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
1334 nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
1335 nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
1336 nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
1337 nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
1338 nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
1339 nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
1340 nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
1341 nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
1342 nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
1343 nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
1344 nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
1345 nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
1346 nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
1347 nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
1348 nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
1349 nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
1350 nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
1351 nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
1352 nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
1353 nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
1354 nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
1355 nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
1356 nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
1357 nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
1358 nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
1359 nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
1360 nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
1361 nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
1362 nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
1363 nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
1364 nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
1365 nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
1366 nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
1367 nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
1368 nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
1369 nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
1370 nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
1371 nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
1372 nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
1373 nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
1374 nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
1375 nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
1376 nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
1377 nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
1378 nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
1379 nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
1380 nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
1381 nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
1382 nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
1383 nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
1384 nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
1385 nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
1386 nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
1387 nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
1388 nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
1389 nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
1390 nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
1391 nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
1392 nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
1393 nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
1394 nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
1395 nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
1396 nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
1397 nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
1398 nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
1399 nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
1400 nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
1401 nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
1402 nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
1403 nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
1404 nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
1405 nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
1406 nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
1407 nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
1408 nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
1409 nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
1410 nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
1411 nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
1412 nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
1413 nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
1414 nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
1415 nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
1416 nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
1417 nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
1418 nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
1419 nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
1420 nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
1421 nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
1422 nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
1423 nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
1424 nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
1425 nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
1426 nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
1427 nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
1428 nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
1429 nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
1430 nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
1431 nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
1432 nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
1433 nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
1434 nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
1435 nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
1436 nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
1437 nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
1438 nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
1439 nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
1440 nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
1441 nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
1442 nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
1443 nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
1444 nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
1445 nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
1446 nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
1447 nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
1448 nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
1449 nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
1450 nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
1451 nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
1452 nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
1453 nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
1454 nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
1455 nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
1456 nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
1457 nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
1458 nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
1459 nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
1460 nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
1461 nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
1462 nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
1463 nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
1464 nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
1465 nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
1466 nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
1467 nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
1468 nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
1469 nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
1470 nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
1471 nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
1472 nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
1473 nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
1474 nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
1475 nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
1476 nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
1477 nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
1478 nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
1479 nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
1480 nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
1481 nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
1482 nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
1483 nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
1484 nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
1485 nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
1486 nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
1487 nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
1488 nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
1489 nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
1490 nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
1491 nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
1492 nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
1493 nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
1494 nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
1495 nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
1496 nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
1497 nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
1498 nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
1499 nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
1500 nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
1501 nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
1502 nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
1503 nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
1504 nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
1505 nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
1506 nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
1507 nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
1508 nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
1509 nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
1510 nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
1511 nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
1512 nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
1513 nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
1514 nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
1515 nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
1516 nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
1517 nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
1518 nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
1519 nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
1520 nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
1521 nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
1522 nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
1523 nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
1524 nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
1525 nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
1526 nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
1527 nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
1528 nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
1529 nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
1530 nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
1531 nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
1532 nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
1533 nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
1534 nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
1535 nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
1536 nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
1537 nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
1538 nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
1539 nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
1540 nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
1541 nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
1542 nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
1543 nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
1544 nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
1545 nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
1546 nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
1547 nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
1548 nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
1549 nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
1550 nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
1551 nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
1552 nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
1553 nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
1554 nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
1555 nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
1556 nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
1557 nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
1558 nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
1559 nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
1560 nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
1561 nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
1562 nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
1563 nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
1564 nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
1565 nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
1566 nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
1567 nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
1568 nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
1569 nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
1570 nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
1571 nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
1572 nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
1573 nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
1574 nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
1575 nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
1576 nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
1577 nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
1578 nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
1579 nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
1580 nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
1581 nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
1582 nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
1583 nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
1584 nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
1585 nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
1586 nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
1587 nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
1588 nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
1589 nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
1590 nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
1591 nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
1592 nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
1593 nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
1594 nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
1595 nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
1596 nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
1597 nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
1598 nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
1599 nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
1600 nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
1601 nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
1602 nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
1603 nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
1604 nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
1605 nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
1606 nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
1607 nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
1608 nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
1609 nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
1610 nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
1611 nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
1612 nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
1613 nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
1614 nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
1615 nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
1616 nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
1617 nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
1618 nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
1619 nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
1620 nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
1621 nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
1622 nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
1623 nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
1624 nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
1625 nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
1626 nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
1627 nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
1628 nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
1629 nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
1630 nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
1631 nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
1632 nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
1633 nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
1634 nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
1635 nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
1636 nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
1637 nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
1638 nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
1639 nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
1640 nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
1641 nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
1642 nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
1643 nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
1644 nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
1645 nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
1646 nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
1647 nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
1648 nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
1649 nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
1650 nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
1651 nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
1652 nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
1653 nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
1654 nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
1655 nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
1656 nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
1657 nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
1658 nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
1659 nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
1660 nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
1661 nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
1662 nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
1663 nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
1664 nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
1665 nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
1666 nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
1667 nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
1668 nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
1669 nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
1670 nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
1671 nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
1672 nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
1673 nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
1674 nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
1675 nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
1676 nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
1677 nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
1678 nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
1679 nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
1680 nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
1681 nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
1682 nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
1683 nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
1684 nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
1685 nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
1686 nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
1687 nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
1688 nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
1689 nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
1690 nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
1691 nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
1692 nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
1693 nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
1694 nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
1695 nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
1696 nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
1697 nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
1698 nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
1699 nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
1700 nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
1701 nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
1702 nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
1703 nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
1704 nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
1705 nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
1706 nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
1707 nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
1708 nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
1709 nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
1710 nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
1711 nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
1712 nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
1713 nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
1714 nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
1715 nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
1716 nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
1717 nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
1718 nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
1719 nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
1720 nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
1721 nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
1722 nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
1723 nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
1724 nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
1725 nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
1726 nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
1727 nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
1728 nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
1729 nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
1730 nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
1731 nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
1732 nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
1733 nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
1734 nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
1735 nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
1736 nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
1737 nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
1738 nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
1739 nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
1740 nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
1741 nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
1742 nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
1743 nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
1744 nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
1745 nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
1746 nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
1747 nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
1748 nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
1749 nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
1750 nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
1751 nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
1752 nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
1753 nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
1754 nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
1755 nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
1756 nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
1757 nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
1758 nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
1759 nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
1760 nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
1761 nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
1762 nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
1763 nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
1764 nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
1765 nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
1766 nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
1767 nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
1768 nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
1769 nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
1770 nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
1771 nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
1772 nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
1773 nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
1774 nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
1775 nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
1776 nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
1777 nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
1778 nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
1779 nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
1780 nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
1781 nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
1782 nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
1783 nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
1784 nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
1785 nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
1786 nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
1787 nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
1788 nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
1789 nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
1790 nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
1791 nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
1792 nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
1793 nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
1794 nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
1795 nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
1796 nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
1797 nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
1798 nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
1799 nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
1800 nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
1801 nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
1802 nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
1803 nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
1804 nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
1805 nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
1806 nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
1807 nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
1808 nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
1809 nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
1810 nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
1811 nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
1812 nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
1813 nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
1814 nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
1815 nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
1816 nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
1817 nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
1818 nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
1819 nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
1820 nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
1821 nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
1822 nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
1823 nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
1824 nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
1825 nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
1826 nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
1827 nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
1828 nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
1829 nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
1830 nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
1831 nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
1832 nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
1833 nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
1834 nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
1835 nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
1836 nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
1837 nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
1838 nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
1839 nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
1840 nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
1841 nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
1842 nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
1843 nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
1844 nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
1845 nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
1846 nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
1847 nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
1848 nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
1849 nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
1850 nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
1851 nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
1852 nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
1853 nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
1854 nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
1855 nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
1856 nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
1857 nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
1858 nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
1859 nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
1860 nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
1861 nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
1862 nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
1863 nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
1864 nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
1865 nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
1866 nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
1867 nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
1868 nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
1869 nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
1870 nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
1871 nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
1872 nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
1873 nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
1874 nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
1875 nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
1876 nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
1877 nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
1878 nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
1879 nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
1880 nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
1881 nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
1882 nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
1883 nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
1884 nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
1885 nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
1886 nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
1887 nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
1888 nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
1889 nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
1890 nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
1891 nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
1892 nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
1893 nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
1894 nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
1895 nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
1896 nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
1897 nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
1898 nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
1899 nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
1900 nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
1901 nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
1902 nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
1903 nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
1904 nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
1905 nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
1906 nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
1907 nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
1908 nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
1909 nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
1910 nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
1911 nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
1912 nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
1913 nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
1914 nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
1915 nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
1916 nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
1917 nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
1918 nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
1919 nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
1920 nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
1921 nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
1922 nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
1923 nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
1924 nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
1925 nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
1926 nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
1927 nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
1928 nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
1929 nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
1930 nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
1931 nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
1932 nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
1933 nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
1934 nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
1935 nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
1936 nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
1937 nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
1938 nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
1939 nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
1940 nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
1941 nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
1942 nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
1943 nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
1944 nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
1945 nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
1946 nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
1947 nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
1948 nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
1949 nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
1950 nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
1951 nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
1952 nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
1953 nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
1954 nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
1955 nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
1956 nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
1957 nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
1958 nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
1959 nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
1960 nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
1961 nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
1962 nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
1963 nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
1964 nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
1965 nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
1966 nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
1967 nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
1968 nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
1969 nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
1970 nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
1971 nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
1972 nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
1973 nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
1974 nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
1975 nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
1976 nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
1977 nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
1978 nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
1979 nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
1980 nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
1981 nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
1982 nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
1983 nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
1984 nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
1985 nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
1986 nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
1987 nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
1988 nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
1989 nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
1990 nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
1991 nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
1992 nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
1993 nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
1994 nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
1995 nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
1996 nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
1997 nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
1998 nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
1999 nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
2000 nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
2001 nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
2002 nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
2003 nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
2004 nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
2005 nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
2006 nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
2007 nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
2008 nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
2009 nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
2010 nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
2011 nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
2012 nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
2013 nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
2014 nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
2015 nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
2016 nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
2017 nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
2018 nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
2019 nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
2020 nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
2021 nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
2022 nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
2023 nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
2024 nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
2025 nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
2026 nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
2027 nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
2028 nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
2029 nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
2030 nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
2031 nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
2032 nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
2033 nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
2034 nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
2035 nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
2036 nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
2037 nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
2038 nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
2039 nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
2040 nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
2041 nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
2042 nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
2043 nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
2044 nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
2045 nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
2046 nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
2047 nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
2048 nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
2049 nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
2050 nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
2051 nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
2052 nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
2053 nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
2054 nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
2055 nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
2056 nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
2057 nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
2058 nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
2059 nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
2060 nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
2061 nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
2062 nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
2063 nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
2064 nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
2065 nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
2066 nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
2067 nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
2068 nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
2069 nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
2070 nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
2071 nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
2072 nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
2073 nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
2074 nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
2075 nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
2076 nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
2077 nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
2078 nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
2079 nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
2080 nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
2081 nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
2082 nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
2083 nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
2084 nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
2085 nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
2086 nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
2087 nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
2088 nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
2089 nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
2090 nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
2091 nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
2092 nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
2093 nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
2094 nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
2095 nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
2096 nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
2097 nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
2098 nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
2099 nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
2100 nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
2101 nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
2102 nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
2103 nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
2104 nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
2105 nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
2106 nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
2107 nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
2108 nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
2109 nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
2110 nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
2111 nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
2112 nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
2113 nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
2114 nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
2115 nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
2116 nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
2117 nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
2118 nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
2119 nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
2120 nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
2121 nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
2122 nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
2123 nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
2124 nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
2125 nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
2126 nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
2127 nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
2128 nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
2129 nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
2130 nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
2131 nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
2132 nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
2133 nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
2134 nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
2135 nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
2136 nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
2137 nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
2138 nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
2139 nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
2140 nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
2141 nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
2142 nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
2143}
2144
2145static void
2146nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
2147{
2148 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
2149 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
2150 nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
2151 nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
2152 nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
2153 nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
2154 nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
2155 nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
2156 nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
2157 nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
2158 nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
2159 nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
2160 nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
2161 nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
2162 nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
2163 nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
2164 nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
2165 nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
2166}
2167
2168static void
2169nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
2170{
2171 nv_wr32(priv, 0x404010, 0x0);
2172 nv_wr32(priv, 0x404014, 0x0);
2173 nv_wr32(priv, 0x404018, 0x0);
2174 nv_wr32(priv, 0x40401c, 0x0);
2175 nv_wr32(priv, 0x404020, 0x0);
2176 nv_wr32(priv, 0x404024, 0xe000);
2177 nv_wr32(priv, 0x404028, 0x0);
2178 nv_wr32(priv, 0x4040a8, 0x0);
2179 nv_wr32(priv, 0x4040ac, 0x0);
2180 nv_wr32(priv, 0x4040b0, 0x0);
2181 nv_wr32(priv, 0x4040b4, 0x0);
2182 nv_wr32(priv, 0x4040b8, 0x0);
2183 nv_wr32(priv, 0x4040bc, 0x0);
2184 nv_wr32(priv, 0x4040c0, 0x0);
2185 nv_wr32(priv, 0x4040c4, 0x0);
2186 nv_wr32(priv, 0x4040c8, 0xf800008f);
2187 nv_wr32(priv, 0x4040d0, 0x0);
2188 nv_wr32(priv, 0x4040d4, 0x0);
2189 nv_wr32(priv, 0x4040d8, 0x0);
2190 nv_wr32(priv, 0x4040dc, 0x0);
2191 nv_wr32(priv, 0x4040e0, 0x0);
2192 nv_wr32(priv, 0x4040e4, 0x0);
2193 nv_wr32(priv, 0x4040e8, 0x1000);
2194 nv_wr32(priv, 0x4040f8, 0x0);
2195 nv_wr32(priv, 0x404130, 0x0);
2196 nv_wr32(priv, 0x404134, 0x0);
2197 nv_wr32(priv, 0x404138, 0x20000040);
2198 nv_wr32(priv, 0x404150, 0x2e);
2199 nv_wr32(priv, 0x404154, 0x400);
2200 nv_wr32(priv, 0x404158, 0x200);
2201 nv_wr32(priv, 0x404164, 0x55);
2202 nv_wr32(priv, 0x4041a0, 0x0);
2203 nv_wr32(priv, 0x4041a4, 0x0);
2204 nv_wr32(priv, 0x4041a8, 0x0);
2205 nv_wr32(priv, 0x4041ac, 0x0);
2206 nv_wr32(priv, 0x404200, 0x0);
2207 nv_wr32(priv, 0x404204, 0x0);
2208 nv_wr32(priv, 0x404208, 0x0);
2209 nv_wr32(priv, 0x40420c, 0x0);
2210}
2211
2212static void
2213nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
2214{
2215 nv_wr32(priv, 0x404404, 0x0);
2216 nv_wr32(priv, 0x404408, 0x0);
2217 nv_wr32(priv, 0x40440c, 0x0);
2218 nv_wr32(priv, 0x404410, 0x0);
2219 nv_wr32(priv, 0x404414, 0x0);
2220 nv_wr32(priv, 0x404418, 0x0);
2221 nv_wr32(priv, 0x40441c, 0x0);
2222 nv_wr32(priv, 0x404420, 0x0);
2223 nv_wr32(priv, 0x404424, 0x0);
2224 nv_wr32(priv, 0x404428, 0x0);
2225 nv_wr32(priv, 0x40442c, 0x0);
2226 nv_wr32(priv, 0x404430, 0x0);
2227 nv_wr32(priv, 0x404434, 0x0);
2228 nv_wr32(priv, 0x404438, 0x0);
2229 nv_wr32(priv, 0x404460, 0x0);
2230 nv_wr32(priv, 0x404464, 0x0);
2231 nv_wr32(priv, 0x404468, 0xffffff);
2232 nv_wr32(priv, 0x40446c, 0x0);
2233 nv_wr32(priv, 0x404480, 0x1);
2234 nv_wr32(priv, 0x404498, 0x1);
2235}
2236
2237static void
2238nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
2239{
2240 nv_wr32(priv, 0x404604, 0x14);
2241 nv_wr32(priv, 0x404608, 0x0);
2242 nv_wr32(priv, 0x40460c, 0x3fff);
2243 nv_wr32(priv, 0x404610, 0x100);
2244 nv_wr32(priv, 0x404618, 0x0);
2245 nv_wr32(priv, 0x40461c, 0x0);
2246 nv_wr32(priv, 0x404620, 0x0);
2247 nv_wr32(priv, 0x404624, 0x0);
2248 nv_wr32(priv, 0x40462c, 0x0);
2249 nv_wr32(priv, 0x404630, 0x0);
2250 nv_wr32(priv, 0x404640, 0x0);
2251 nv_wr32(priv, 0x404654, 0x0);
2252 nv_wr32(priv, 0x404660, 0x0);
2253 nv_wr32(priv, 0x404678, 0x0);
2254 nv_wr32(priv, 0x40467c, 0x2);
2255 nv_wr32(priv, 0x404680, 0x0);
2256 nv_wr32(priv, 0x404684, 0x0);
2257 nv_wr32(priv, 0x404688, 0x0);
2258 nv_wr32(priv, 0x40468c, 0x0);
2259 nv_wr32(priv, 0x404690, 0x0);
2260 nv_wr32(priv, 0x404694, 0x0);
2261 nv_wr32(priv, 0x404698, 0x0);
2262 nv_wr32(priv, 0x40469c, 0x0);
2263 nv_wr32(priv, 0x4046a0, 0x7f0080);
2264 nv_wr32(priv, 0x4046a4, 0x0);
2265 nv_wr32(priv, 0x4046a8, 0x0);
2266 nv_wr32(priv, 0x4046ac, 0x0);
2267 nv_wr32(priv, 0x4046b0, 0x0);
2268 nv_wr32(priv, 0x4046b4, 0x0);
2269 nv_wr32(priv, 0x4046b8, 0x0);
2270 nv_wr32(priv, 0x4046bc, 0x0);
2271 nv_wr32(priv, 0x4046c0, 0x0);
2272 nv_wr32(priv, 0x4046c8, 0x0);
2273 nv_wr32(priv, 0x4046cc, 0x0);
2274 nv_wr32(priv, 0x4046d0, 0x0);
2275}
2276
2277static void
2278nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
2279{
2280 nv_wr32(priv, 0x404700, 0x0);
2281 nv_wr32(priv, 0x404704, 0x0);
2282 nv_wr32(priv, 0x404708, 0x0);
2283 nv_wr32(priv, 0x404718, 0x0);
2284 nv_wr32(priv, 0x40471c, 0x0);
2285 nv_wr32(priv, 0x404720, 0x0);
2286 nv_wr32(priv, 0x404724, 0x0);
2287 nv_wr32(priv, 0x404728, 0x0);
2288 nv_wr32(priv, 0x40472c, 0x0);
2289 nv_wr32(priv, 0x404730, 0x0);
2290 nv_wr32(priv, 0x404734, 0x100);
2291 nv_wr32(priv, 0x404738, 0x0);
2292 nv_wr32(priv, 0x40473c, 0x0);
2293 nv_wr32(priv, 0x404744, 0x0);
2294 nv_wr32(priv, 0x404748, 0x0);
2295 nv_wr32(priv, 0x404754, 0x0);
2296}
2297
2298static void
2299nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
2300{
2301 nv_wr32(priv, 0x405800, 0xf8000bf);
2302 nv_wr32(priv, 0x405830, 0x2180648);
2303 nv_wr32(priv, 0x405834, 0x8000000);
2304 nv_wr32(priv, 0x405838, 0x0);
2305 nv_wr32(priv, 0x405854, 0x0);
2306 nv_wr32(priv, 0x405870, 0x1);
2307 nv_wr32(priv, 0x405874, 0x1);
2308 nv_wr32(priv, 0x405878, 0x1);
2309 nv_wr32(priv, 0x40587c, 0x1);
2310 nv_wr32(priv, 0x405a00, 0x0);
2311 nv_wr32(priv, 0x405a04, 0x0);
2312 nv_wr32(priv, 0x405a18, 0x0);
2313 nv_wr32(priv, 0x405b00, 0x0);
2314 nv_wr32(priv, 0x405b10, 0x1000);
2315}
2316
2317static void
2318nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
2319{
2320 nv_wr32(priv, 0x406020, 0x4103c1);
2321 nv_wr32(priv, 0x406028, 0x1);
2322 nv_wr32(priv, 0x40602c, 0x1);
2323 nv_wr32(priv, 0x406030, 0x1);
2324 nv_wr32(priv, 0x406034, 0x1);
2325}
2326
2327static void
2328nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
2329{
2330 nv_wr32(priv, 0x4064a8, 0x0);
2331 nv_wr32(priv, 0x4064ac, 0x3fff);
2332 nv_wr32(priv, 0x4064b4, 0x0);
2333 nv_wr32(priv, 0x4064b8, 0x0);
2334 nv_wr32(priv, 0x4064c0, 0x801a00f0);
2335 nv_wr32(priv, 0x4064c4, 0x192ffff);
2336 nv_wr32(priv, 0x4064c8, 0x1800600);
2337 nv_wr32(priv, 0x4064cc, 0x0);
2338 nv_wr32(priv, 0x4064d0, 0x0);
2339 nv_wr32(priv, 0x4064d4, 0x0);
2340 nv_wr32(priv, 0x4064d8, 0x0);
2341 nv_wr32(priv, 0x4064dc, 0x0);
2342 nv_wr32(priv, 0x4064e0, 0x0);
2343 nv_wr32(priv, 0x4064e4, 0x0);
2344 nv_wr32(priv, 0x4064e8, 0x0);
2345 nv_wr32(priv, 0x4064ec, 0x0);
2346 nv_wr32(priv, 0x4064fc, 0x22a);
2347}
2348
2349static void
2350nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
2351{
2352 nv_wr32(priv, 0x407040, 0x0);
2353}
2354
2355static void
2356nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
2357{
2358 nv_wr32(priv, 0x407804, 0x23);
2359 nv_wr32(priv, 0x40780c, 0xa418820);
2360 nv_wr32(priv, 0x407810, 0x62080e6);
2361 nv_wr32(priv, 0x407814, 0x20398a4);
2362 nv_wr32(priv, 0x407818, 0xe629062);
2363 nv_wr32(priv, 0x40781c, 0xa418820);
2364 nv_wr32(priv, 0x407820, 0xe6);
2365 nv_wr32(priv, 0x4078bc, 0x103);
2366}
2367
2368static void
2369nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
2370{
2371 nv_wr32(priv, 0x408000, 0x0);
2372 nv_wr32(priv, 0x408004, 0x0);
2373 nv_wr32(priv, 0x408008, 0x30);
2374 nv_wr32(priv, 0x40800c, 0x0);
2375 nv_wr32(priv, 0x408010, 0x0);
2376 nv_wr32(priv, 0x408014, 0x69);
2377 nv_wr32(priv, 0x408018, 0xe100e100);
2378 nv_wr32(priv, 0x408064, 0x0);
2379}
2380
2381static void
2382nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
2383{
2384 nv_wr32(priv, 0x408800, 0x2802a3c);
2385 nv_wr32(priv, 0x408804, 0x40);
2386 nv_wr32(priv, 0x408808, 0x1043e005);
2387 nv_wr32(priv, 0x408840, 0xb);
2388 nv_wr32(priv, 0x408900, 0x3080b801);
2389 nv_wr32(priv, 0x408904, 0x62000001);
2390 nv_wr32(priv, 0x408908, 0xc8102f);
2391 nv_wr32(priv, 0x408980, 0x11d);
2392}
2393
2394static void
2395nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
2396{
2397 nv_wr32(priv, 0x418380, 0x16);
2398 nv_wr32(priv, 0x418400, 0x38004e00);
2399 nv_wr32(priv, 0x418404, 0x71e0ffff);
2400 nv_wr32(priv, 0x41840c, 0x1008);
2401 nv_wr32(priv, 0x418410, 0xfff0fff);
2402 nv_wr32(priv, 0x418414, 0x2200fff);
2403 nv_wr32(priv, 0x418450, 0x0);
2404 nv_wr32(priv, 0x418454, 0x0);
2405 nv_wr32(priv, 0x418458, 0x0);
2406 nv_wr32(priv, 0x41845c, 0x0);
2407 nv_wr32(priv, 0x418460, 0x0);
2408 nv_wr32(priv, 0x418464, 0x0);
2409 nv_wr32(priv, 0x418468, 0x1);
2410 nv_wr32(priv, 0x41846c, 0x0);
2411 nv_wr32(priv, 0x418470, 0x0);
2412 nv_wr32(priv, 0x418600, 0x1f);
2413 nv_wr32(priv, 0x418684, 0xf);
2414 nv_wr32(priv, 0x418700, 0x2);
2415 nv_wr32(priv, 0x418704, 0x80);
2416 nv_wr32(priv, 0x418708, 0x0);
2417 nv_wr32(priv, 0x41870c, 0x0);
2418 nv_wr32(priv, 0x418710, 0x0);
2419 nv_wr32(priv, 0x418800, 0x7006860a);
2420 nv_wr32(priv, 0x418808, 0x0);
2421 nv_wr32(priv, 0x41880c, 0x0);
2422 nv_wr32(priv, 0x418810, 0x0);
2423 nv_wr32(priv, 0x418828, 0x44);
2424 nv_wr32(priv, 0x418830, 0x10000001);
2425 nv_wr32(priv, 0x4188d8, 0x8);
2426 nv_wr32(priv, 0x4188e0, 0x1000000);
2427 nv_wr32(priv, 0x4188e8, 0x0);
2428 nv_wr32(priv, 0x4188ec, 0x0);
2429 nv_wr32(priv, 0x4188f0, 0x0);
2430 nv_wr32(priv, 0x4188f4, 0x0);
2431 nv_wr32(priv, 0x4188f8, 0x0);
2432 nv_wr32(priv, 0x4188fc, 0x20100018);
2433 nv_wr32(priv, 0x41891c, 0xff00ff);
2434 nv_wr32(priv, 0x418924, 0x0);
2435 nv_wr32(priv, 0x418928, 0xffff00);
2436 nv_wr32(priv, 0x41892c, 0xff00);
2437 nv_wr32(priv, 0x418a00, 0x0);
2438 nv_wr32(priv, 0x418a04, 0x0);
2439 nv_wr32(priv, 0x418a08, 0x0);
2440 nv_wr32(priv, 0x418a0c, 0x10000);
2441 nv_wr32(priv, 0x418a10, 0x0);
2442 nv_wr32(priv, 0x418a14, 0x0);
2443 nv_wr32(priv, 0x418a18, 0x0);
2444 nv_wr32(priv, 0x418a20, 0x0);
2445 nv_wr32(priv, 0x418a24, 0x0);
2446 nv_wr32(priv, 0x418a28, 0x0);
2447 nv_wr32(priv, 0x418a2c, 0x10000);
2448 nv_wr32(priv, 0x418a30, 0x0);
2449 nv_wr32(priv, 0x418a34, 0x0);
2450 nv_wr32(priv, 0x418a38, 0x0);
2451 nv_wr32(priv, 0x418a40, 0x0);
2452 nv_wr32(priv, 0x418a44, 0x0);
2453 nv_wr32(priv, 0x418a48, 0x0);
2454 nv_wr32(priv, 0x418a4c, 0x10000);
2455 nv_wr32(priv, 0x418a50, 0x0);
2456 nv_wr32(priv, 0x418a54, 0x0);
2457 nv_wr32(priv, 0x418a58, 0x0);
2458 nv_wr32(priv, 0x418a60, 0x0);
2459 nv_wr32(priv, 0x418a64, 0x0);
2460 nv_wr32(priv, 0x418a68, 0x0);
2461 nv_wr32(priv, 0x418a6c, 0x10000);
2462 nv_wr32(priv, 0x418a70, 0x0);
2463 nv_wr32(priv, 0x418a74, 0x0);
2464 nv_wr32(priv, 0x418a78, 0x0);
2465 nv_wr32(priv, 0x418a80, 0x0);
2466 nv_wr32(priv, 0x418a84, 0x0);
2467 nv_wr32(priv, 0x418a88, 0x0);
2468 nv_wr32(priv, 0x418a8c, 0x10000);
2469 nv_wr32(priv, 0x418a90, 0x0);
2470 nv_wr32(priv, 0x418a94, 0x0);
2471 nv_wr32(priv, 0x418a98, 0x0);
2472 nv_wr32(priv, 0x418aa0, 0x0);
2473 nv_wr32(priv, 0x418aa4, 0x0);
2474 nv_wr32(priv, 0x418aa8, 0x0);
2475 nv_wr32(priv, 0x418aac, 0x10000);
2476 nv_wr32(priv, 0x418ab0, 0x0);
2477 nv_wr32(priv, 0x418ab4, 0x0);
2478 nv_wr32(priv, 0x418ab8, 0x0);
2479 nv_wr32(priv, 0x418ac0, 0x0);
2480 nv_wr32(priv, 0x418ac4, 0x0);
2481 nv_wr32(priv, 0x418ac8, 0x0);
2482 nv_wr32(priv, 0x418acc, 0x10000);
2483 nv_wr32(priv, 0x418ad0, 0x0);
2484 nv_wr32(priv, 0x418ad4, 0x0);
2485 nv_wr32(priv, 0x418ad8, 0x0);
2486 nv_wr32(priv, 0x418ae0, 0x0);
2487 nv_wr32(priv, 0x418ae4, 0x0);
2488 nv_wr32(priv, 0x418ae8, 0x0);
2489 nv_wr32(priv, 0x418aec, 0x10000);
2490 nv_wr32(priv, 0x418af0, 0x0);
2491 nv_wr32(priv, 0x418af4, 0x0);
2492 nv_wr32(priv, 0x418af8, 0x0);
2493 nv_wr32(priv, 0x418b00, 0x6);
2494 nv_wr32(priv, 0x418b08, 0xa418820);
2495 nv_wr32(priv, 0x418b0c, 0x62080e6);
2496 nv_wr32(priv, 0x418b10, 0x20398a4);
2497 nv_wr32(priv, 0x418b14, 0xe629062);
2498 nv_wr32(priv, 0x418b18, 0xa418820);
2499 nv_wr32(priv, 0x418b1c, 0xe6);
2500 nv_wr32(priv, 0x418bb8, 0x103);
2501 nv_wr32(priv, 0x418c08, 0x1);
2502 nv_wr32(priv, 0x418c10, 0x0);
2503 nv_wr32(priv, 0x418c14, 0x0);
2504 nv_wr32(priv, 0x418c18, 0x0);
2505 nv_wr32(priv, 0x418c1c, 0x0);
2506 nv_wr32(priv, 0x418c20, 0x0);
2507 nv_wr32(priv, 0x418c24, 0x0);
2508 nv_wr32(priv, 0x418c28, 0x0);
2509 nv_wr32(priv, 0x418c2c, 0x0);
2510 nv_wr32(priv, 0x418c40, 0xffffffff);
2511 nv_wr32(priv, 0x418c6c, 0x1);
2512 nv_wr32(priv, 0x418c80, 0x20200004);
2513 nv_wr32(priv, 0x418c8c, 0x1);
2514 nv_wr32(priv, 0x419000, 0x780);
2515 nv_wr32(priv, 0x419004, 0x0);
2516 nv_wr32(priv, 0x419008, 0x0);
2517 nv_wr32(priv, 0x419014, 0x4);
2518}
2519
2520static void
2521nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
2522{
2523 nv_wr32(priv, 0x419848, 0x0);
2524 nv_wr32(priv, 0x419864, 0x129);
2525 nv_wr32(priv, 0x419888, 0x0);
2526 nv_wr32(priv, 0x419a00, 0xf0);
2527 nv_wr32(priv, 0x419a04, 0x1);
2528 nv_wr32(priv, 0x419a08, 0x21);
2529 nv_wr32(priv, 0x419a0c, 0x20000);
2530 nv_wr32(priv, 0x419a10, 0x0);
2531 nv_wr32(priv, 0x419a14, 0x200);
2532 nv_wr32(priv, 0x419a1c, 0xc000);
2533 nv_wr32(priv, 0x419a20, 0x800);
2534 nv_wr32(priv, 0x419a30, 0x1);
2535 nv_wr32(priv, 0x419ac4, 0x37f440);
2536 nv_wr32(priv, 0x419c00, 0xa);
2537 nv_wr32(priv, 0x419c04, 0x80000006);
2538 nv_wr32(priv, 0x419c08, 0x2);
2539 nv_wr32(priv, 0x419c20, 0x0);
2540 nv_wr32(priv, 0x419c24, 0x84210);
2541 nv_wr32(priv, 0x419c28, 0x3efbefbe);
2542 nv_wr32(priv, 0x419ce8, 0x0);
2543 nv_wr32(priv, 0x419cf4, 0x3203);
2544 nv_wr32(priv, 0x419e04, 0x0);
2545 nv_wr32(priv, 0x419e08, 0x0);
2546 nv_wr32(priv, 0x419e0c, 0x0);
2547 nv_wr32(priv, 0x419e10, 0x402);
2548 nv_wr32(priv, 0x419e44, 0x13eff2);
2549 nv_wr32(priv, 0x419e48, 0x0);
2550 nv_wr32(priv, 0x419e4c, 0x7f);
2551 nv_wr32(priv, 0x419e50, 0x0);
2552 nv_wr32(priv, 0x419e54, 0x0);
2553 nv_wr32(priv, 0x419e58, 0x0);
2554 nv_wr32(priv, 0x419e5c, 0x0);
2555 nv_wr32(priv, 0x419e60, 0x0);
2556 nv_wr32(priv, 0x419e64, 0x0);
2557 nv_wr32(priv, 0x419e68, 0x0);
2558 nv_wr32(priv, 0x419e6c, 0x0);
2559 nv_wr32(priv, 0x419e70, 0x0);
2560 nv_wr32(priv, 0x419e74, 0x0);
2561 nv_wr32(priv, 0x419e78, 0x0);
2562 nv_wr32(priv, 0x419e7c, 0x0);
2563 nv_wr32(priv, 0x419e80, 0x0);
2564 nv_wr32(priv, 0x419e84, 0x0);
2565 nv_wr32(priv, 0x419e88, 0x0);
2566 nv_wr32(priv, 0x419e8c, 0x0);
2567 nv_wr32(priv, 0x419e90, 0x0);
2568 nv_wr32(priv, 0x419e94, 0x0);
2569 nv_wr32(priv, 0x419e98, 0x0);
2570 nv_wr32(priv, 0x419eac, 0x1fcf);
2571 nv_wr32(priv, 0x419eb0, 0xd3f);
2572 nv_wr32(priv, 0x419ec8, 0x1304f);
2573 nv_wr32(priv, 0x419f30, 0x0);
2574 nv_wr32(priv, 0x419f34, 0x0);
2575 nv_wr32(priv, 0x419f38, 0x0);
2576 nv_wr32(priv, 0x419f3c, 0x0);
2577 nv_wr32(priv, 0x419f40, 0x0);
2578 nv_wr32(priv, 0x419f44, 0x0);
2579 nv_wr32(priv, 0x419f48, 0x0);
2580 nv_wr32(priv, 0x419f4c, 0x0);
2581 nv_wr32(priv, 0x419f58, 0x0);
2582 nv_wr32(priv, 0x419f78, 0xb);
2583}
2584
2585static void
2586nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
2587{
2588 nv_wr32(priv, 0x41be24, 0x6);
2589 nv_wr32(priv, 0x41bec0, 0x12180000);
2590 nv_wr32(priv, 0x41bec4, 0x37f7f);
2591 nv_wr32(priv, 0x41bee4, 0x6480430);
2592 nv_wr32(priv, 0x41bf00, 0xa418820);
2593 nv_wr32(priv, 0x41bf04, 0x62080e6);
2594 nv_wr32(priv, 0x41bf08, 0x20398a4);
2595 nv_wr32(priv, 0x41bf0c, 0xe629062);
2596 nv_wr32(priv, 0x41bf10, 0xa418820);
2597 nv_wr32(priv, 0x41bf14, 0xe6);
2598 nv_wr32(priv, 0x41bfd0, 0x900103);
2599 nv_wr32(priv, 0x41bfe0, 0x400001);
2600 nv_wr32(priv, 0x41bfe4, 0x0);
2601}
2602
2603int
2604nve0_grctx_generate(struct nvc0_graph_priv *priv)
2605{
2606 struct nvc0_grctx info;
2607 int ret, i, gpc, tpc, id;
2608 u32 data[6] = {}, data2[2] = {}, tmp;
2609 u32 tpc_set = 0, tpc_mask = 0;
2610 u32 magic[GPC_MAX][2], offset;
2611 u8 tpcnr[GPC_MAX], a, b;
2612 u8 shift, ntpcv;
2613
2614 ret = nvc0_grctx_init(priv, &info);
2615 if (ret)
2616 return ret;
2617
2618 nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
2619 nv_wr32(priv, 0x400204, 0x00000000);
2620 nv_wr32(priv, 0x400208, 0x00000000);
2621
2622 nve0_graph_generate_unk40xx(priv);
2623 nve0_graph_generate_unk44xx(priv);
2624 nve0_graph_generate_unk46xx(priv);
2625 nve0_graph_generate_unk47xx(priv);
2626 nve0_graph_generate_unk58xx(priv);
2627 nve0_graph_generate_unk60xx(priv);
2628 nve0_graph_generate_unk64xx(priv);
2629 nve0_graph_generate_unk70xx(priv);
2630 nve0_graph_generate_unk78xx(priv);
2631 nve0_graph_generate_unk80xx(priv);
2632 nve0_graph_generate_unk88xx(priv);
2633 nve0_graph_generate_gpc(priv);
2634 nve0_graph_generate_tpc(priv);
2635 nve0_graph_generate_tpcunk(priv);
2636
2637 nv_wr32(priv, 0x404154, 0x0);
2638
2639 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
2640 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
2641 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
2642 mmio_list(0x40800c, 0x00000000, 8, 1);
2643 mmio_list(0x408010, 0x80000000, 0, 0);
2644 mmio_list(0x419004, 0x00000000, 8, 1);
2645 mmio_list(0x419008, 0x00000000, 0, 0);
2646 mmio_list(0x4064cc, 0x80000000, 0, 0);
2647 mmio_list(0x408004, 0x00000000, 8, 0);
2648 mmio_list(0x408008, 0x80000030, 0, 0);
2649 mmio_list(0x418808, 0x00000000, 8, 0);
2650 mmio_list(0x41880c, 0x80000030, 0, 0);
2651 mmio_list(0x4064c8, 0x01800600, 0, 0);
2652 mmio_list(0x418810, 0x80000000, 12, 2);
2653 mmio_list(0x419848, 0x10000000, 12, 2);
2654 mmio_list(0x405830, 0x02180648, 0, 0);
2655 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
2656 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
2657 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
2658 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
2659 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
2660 magic[gpc][1] = 0x00000000 | (magic1 << 16);
2661 offset += 0x0324 * priv->tpc_nr[gpc];
2662 }
2663 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2664 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
2665 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
2666 offset += 0x07ff * priv->tpc_nr[gpc];
2667 }
2668 mmio_list(0x17e91c, 0x06060609, 0, 0);
2669 mmio_list(0x17e920, 0x00090a05, 0, 0);
2670
2671 nv_wr32(priv, 0x418c6c, 0x1);
2672 nv_wr32(priv, 0x41980c, 0x10);
2673 nv_wr32(priv, 0x41be08, 0x4);
2674 nv_wr32(priv, 0x4064c0, 0x801a00f0);
2675 nv_wr32(priv, 0x405800, 0xf8000bf);
2676 nv_wr32(priv, 0x419c00, 0xa);
2677
2678 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2679 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2680 if (tpc < priv->tpc_nr[gpc]) {
2681 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
2682 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
2683 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2684 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
2685 }
2686
2687 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2688 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2689 }
2690 }
2691
2692 tmp = 0;
2693 for (i = 0; i < priv->gpc_nr; i++)
2694 tmp |= priv->tpc_nr[i] << (i * 4);
2695 nv_wr32(priv, 0x406028, tmp);
2696 nv_wr32(priv, 0x405870, tmp);
2697
2698 nv_wr32(priv, 0x40602c, 0x0);
2699 nv_wr32(priv, 0x405874, 0x0);
2700 nv_wr32(priv, 0x406030, 0x0);
2701 nv_wr32(priv, 0x405878, 0x0);
2702 nv_wr32(priv, 0x406034, 0x0);
2703 nv_wr32(priv, 0x40587c, 0x0);
2704
2705 /* calculate first set of magics */
2706 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2707
2708 gpc = -1;
2709 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2710 do {
2711 gpc = (gpc + 1) % priv->gpc_nr;
2712 } while (!tpcnr[gpc]);
2713 tpcnr[gpc]--;
2714
2715 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2716 }
2717
2718 for (; tpc < 32; tpc++)
2719 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2720
2721 /* and the second... */
2722 shift = 0;
2723 ntpcv = priv->tpc_total;
2724 while (!(ntpcv & (1 << 4))) {
2725 ntpcv <<= 1;
2726 shift++;
2727 }
2728
2729 data2[0] = ntpcv << 16;
2730 data2[0] |= shift << 21;
2731 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2732 data2[0] |= priv->tpc_total << 8;
2733 data2[0] |= priv->magic_not_rop_nr;
2734 for (i = 1; i < 7; i++)
2735 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2736
2737 /* and write it all the various parts of PGRAPH */
2738 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2739 for (i = 0; i < 6; i++)
2740 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2741
2742 nv_wr32(priv, 0x41bfd0, data2[0]);
2743 nv_wr32(priv, 0x41bfe4, data2[1]);
2744 for (i = 0; i < 6; i++)
2745 nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
2746
2747 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2748 for (i = 0; i < 6; i++)
2749 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2750
2751
2752 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2753 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2754 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2755
2756 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2757 a = (i * (priv->tpc_total - 1)) / 32;
2758 if (a != b) {
2759 b = a;
2760 do {
2761 gpc = (gpc + 1) % priv->gpc_nr;
2762 } while (!tpcnr[gpc]);
2763 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2764
2765 tpc_set |= 1 << ((gpc * 8) + tpc);
2766 }
2767
2768 nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
2769 nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2770 }
2771
2772 for (i = 0; i < 8; i++)
2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
2774
2775 nv_wr32(priv, 0x405b00, 0x201);
2776 nv_wr32(priv, 0x408850, 0x2);
2777 nv_wr32(priv, 0x408958, 0x2);
2778 nv_wr32(priv, 0x419f78, 0xa);
2779
2780 nve0_grctx_generate_icmd(priv);
2781 nve0_grctx_generate_a097(priv);
2782 nve0_grctx_generate_902d(priv);
2783
2784 nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
2785 nv_wr32(priv, 0x418800, 0x7026860a); //XXX
2786 nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
2787 return nvc0_grctx_fini(&info);
2788}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index 15272be33b66..b86cc60dcd56 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26/* To build: 26/* To build:
27 * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h 27 * m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h
28 */ 28 */
29 29
30/* TODO 30/* TODO
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35.section #nvc0_grgpc_data 35.section #nvc0_grgpc_data
36include(`nvc0_graph.fuc') 36include(`nvc0.fuc')
37gpc_id: .b32 0 37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0 38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0 39gpc_mmio_list_tail: .b32 0
@@ -209,11 +209,11 @@ nvd9_tpc_mmio_tail:
209.section #nvc0_grgpc_code 209.section #nvc0_grgpc_code
210bra #init 210bra #init
211define(`include_code') 211define(`include_code')
212include(`nvc0_graph.fuc') 212include(`nvc0.fuc')
213 213
214// reports an exception to the host 214// reports an exception to the host
215// 215//
216// In: $r15 error code (see nvc0_graph.fuc) 216// In: $r15 error code (see nvc0.fuc)
217// 217//
218error: 218error:
219 push $r14 219 push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index a988b8ad00ac..96050ddb22ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -1,11 +1,19 @@
1uint32_t nvc0_grgpc_data[] = { 1uint32_t nvc0_grgpc_data[] = {
2/* 0x0000: gpc_id */
2 0x00000000, 3 0x00000000,
4/* 0x0004: gpc_mmio_list_head */
3 0x00000000, 5 0x00000000,
6/* 0x0008: gpc_mmio_list_tail */
4 0x00000000, 7 0x00000000,
8/* 0x000c: tpc_count */
5 0x00000000, 9 0x00000000,
10/* 0x0010: tpc_mask */
6 0x00000000, 11 0x00000000,
12/* 0x0014: tpc_mmio_list_head */
7 0x00000000, 13 0x00000000,
14/* 0x0018: tpc_mmio_list_tail */
8 0x00000000, 15 0x00000000,
16/* 0x001c: cmd_queue */
9 0x00000000, 17 0x00000000,
10 0x00000000, 18 0x00000000,
11 0x00000000, 19 0x00000000,
@@ -24,6 +32,7 @@ uint32_t nvc0_grgpc_data[] = {
24 0x00000000, 32 0x00000000,
25 0x00000000, 33 0x00000000,
26 0x00000000, 34 0x00000000,
35/* 0x0064: chipsets */
27 0x000000c0, 36 0x000000c0,
28 0x012800c8, 37 0x012800c8,
29 0x01e40194, 38 0x01e40194,
@@ -49,6 +58,7 @@ uint32_t nvc0_grgpc_data[] = {
49 0x0194012c, 58 0x0194012c,
50 0x025401f8, 59 0x025401f8,
51 0x00000000, 60 0x00000000,
61/* 0x00c8: nvc0_gpc_mmio_head */
52 0x00000380, 62 0x00000380,
53 0x14000400, 63 0x14000400,
54 0x20000450, 64 0x20000450,
@@ -73,7 +83,10 @@ uint32_t nvc0_grgpc_data[] = {
73 0x00000c8c, 83 0x00000c8c,
74 0x08001000, 84 0x08001000,
75 0x00001014, 85 0x00001014,
86/* 0x0128: nvc0_gpc_mmio_tail */
76 0x00000c6c, 87 0x00000c6c,
88/* 0x012c: nvc1_gpc_mmio_tail */
89/* 0x012c: nvd9_gpc_mmio_head */
77 0x00000380, 90 0x00000380,
78 0x04000400, 91 0x04000400,
79 0x0800040c, 92 0x0800040c,
@@ -100,6 +113,8 @@ uint32_t nvc0_grgpc_data[] = {
100 0x00000c8c, 113 0x00000c8c,
101 0x08001000, 114 0x08001000,
102 0x00001014, 115 0x00001014,
116/* 0x0194: nvd9_gpc_mmio_tail */
117/* 0x0194: nvc0_tpc_mmio_head */
103 0x00000018, 118 0x00000018,
104 0x0000003c, 119 0x0000003c,
105 0x00000048, 120 0x00000048,
@@ -120,11 +135,16 @@ uint32_t nvc0_grgpc_data[] = {
120 0x4c000644, 135 0x4c000644,
121 0x00000698, 136 0x00000698,
122 0x04000750, 137 0x04000750,
138/* 0x01e4: nvc0_tpc_mmio_tail */
123 0x00000758, 139 0x00000758,
124 0x000002c4, 140 0x000002c4,
125 0x000006e0, 141 0x000006e0,
142/* 0x01f0: nvcf_tpc_mmio_tail */
126 0x000004bc, 143 0x000004bc,
144/* 0x01f4: nvc3_tpc_mmio_tail */
127 0x00000544, 145 0x00000544,
146/* 0x01f8: nvc1_tpc_mmio_tail */
147/* 0x01f8: nvd9_tpc_mmio_head */
128 0x00000018, 148 0x00000018,
129 0x0000003c, 149 0x0000003c,
130 0x00000048, 150 0x00000048,
@@ -152,12 +172,14 @@ uint32_t nvc0_grgpc_data[] = {
152 172
153uint32_t nvc0_grgpc_code[] = { 173uint32_t nvc0_grgpc_code[] = {
154 0x03060ef5, 174 0x03060ef5,
175/* 0x0004: queue_put */
155 0x9800d898, 176 0x9800d898,
156 0x86f001d9, 177 0x86f001d9,
157 0x0489b808, 178 0x0489b808,
158 0xf00c1bf4, 179 0xf00c1bf4,
159 0x21f502f7, 180 0x21f502f7,
160 0x00f802ec, 181 0x00f802ec,
182/* 0x001c: queue_put_next */
161 0xb60798c4, 183 0xb60798c4,
162 0x8dbb0384, 184 0x8dbb0384,
163 0x0880b600, 185 0x0880b600,
@@ -165,6 +187,7 @@ uint32_t nvc0_grgpc_code[] = {
165 0x90b6018f, 187 0x90b6018f,
166 0x0f94f001, 188 0x0f94f001,
167 0xf801d980, 189 0xf801d980,
190/* 0x0039: queue_get */
168 0x0131f400, 191 0x0131f400,
169 0x9800d898, 192 0x9800d898,
170 0x89b801d9, 193 0x89b801d9,
@@ -176,37 +199,46 @@ uint32_t nvc0_grgpc_code[] = {
176 0x80b6019f, 199 0x80b6019f,
177 0x0f84f001, 200 0x0f84f001,
178 0xf400d880, 201 0xf400d880,
202/* 0x0066: queue_get_done */
179 0x00f80132, 203 0x00f80132,
204/* 0x0068: nv_rd32 */
180 0x0728b7f1, 205 0x0728b7f1,
181 0xb906b4b6, 206 0xb906b4b6,
182 0xc9f002ec, 207 0xc9f002ec,
183 0x00bcd01f, 208 0x00bcd01f,
209/* 0x0078: nv_rd32_wait */
184 0xc800bccf, 210 0xc800bccf,
185 0x1bf41fcc, 211 0x1bf41fcc,
186 0x06a7f0fa, 212 0x06a7f0fa,
187 0x010321f5, 213 0x010321f5,
188 0xf840bfcf, 214 0xf840bfcf,
215/* 0x008d: nv_wr32 */
189 0x28b7f100, 216 0x28b7f100,
190 0x06b4b607, 217 0x06b4b607,
191 0xb980bfd0, 218 0xb980bfd0,
192 0xc9f002ec, 219 0xc9f002ec,
193 0x1ec9f01f, 220 0x1ec9f01f,
221/* 0x00a3: nv_wr32_wait */
194 0xcf00bcd0, 222 0xcf00bcd0,
195 0xccc800bc, 223 0xccc800bc,
196 0xfa1bf41f, 224 0xfa1bf41f,
225/* 0x00ae: watchdog_reset */
197 0x87f100f8, 226 0x87f100f8,
198 0x84b60430, 227 0x84b60430,
199 0x1ff9f006, 228 0x1ff9f006,
200 0xf8008fd0, 229 0xf8008fd0,
230/* 0x00bd: watchdog_clear */
201 0x3087f100, 231 0x3087f100,
202 0x0684b604, 232 0x0684b604,
203 0xf80080d0, 233 0xf80080d0,
234/* 0x00c9: wait_donez */
204 0x3c87f100, 235 0x3c87f100,
205 0x0684b608, 236 0x0684b608,
206 0x99f094bd, 237 0x99f094bd,
207 0x0089d000, 238 0x0089d000,
208 0x081887f1, 239 0x081887f1,
209 0xd00684b6, 240 0xd00684b6,
241/* 0x00e2: wait_done_wait_donez */
210 0x87f1008a, 242 0x87f1008a,
211 0x84b60400, 243 0x84b60400,
212 0x0088cf06, 244 0x0088cf06,
@@ -215,6 +247,7 @@ uint32_t nvc0_grgpc_code[] = {
215 0x84b6085c, 247 0x84b6085c,
216 0xf094bd06, 248 0xf094bd06,
217 0x89d00099, 249 0x89d00099,
250/* 0x0103: wait_doneo */
218 0xf100f800, 251 0xf100f800,
219 0xb6083c87, 252 0xb6083c87,
220 0x94bd0684, 253 0x94bd0684,
@@ -222,6 +255,7 @@ uint32_t nvc0_grgpc_code[] = {
222 0x87f10089, 255 0x87f10089,
223 0x84b60818, 256 0x84b60818,
224 0x008ad006, 257 0x008ad006,
258/* 0x011c: wait_done_wait_doneo */
225 0x040087f1, 259 0x040087f1,
226 0xcf0684b6, 260 0xcf0684b6,
227 0x8aff0088, 261 0x8aff0088,
@@ -230,6 +264,8 @@ uint32_t nvc0_grgpc_code[] = {
230 0xbd0684b6, 264 0xbd0684b6,
231 0x0099f094, 265 0x0099f094,
232 0xf80089d0, 266 0xf80089d0,
267/* 0x013d: mmctx_size */
268/* 0x013f: nv_mmctx_size_loop */
233 0x9894bd00, 269 0x9894bd00,
234 0x85b600e8, 270 0x85b600e8,
235 0x0180b61a, 271 0x0180b61a,
@@ -238,6 +274,7 @@ uint32_t nvc0_grgpc_code[] = {
238 0x04efb804, 274 0x04efb804,
239 0xb9eb1bf4, 275 0xb9eb1bf4,
240 0x00f8029f, 276 0x00f8029f,
277/* 0x015c: mmctx_xfer */
241 0x083c87f1, 278 0x083c87f1,
242 0xbd0684b6, 279 0xbd0684b6,
243 0x0199f094, 280 0x0199f094,
@@ -247,9 +284,11 @@ uint32_t nvc0_grgpc_code[] = {
247 0xf405bbfd, 284 0xf405bbfd,
248 0x8bd0090b, 285 0x8bd0090b,
249 0x0099f000, 286 0x0099f000,
287/* 0x0180: mmctx_base_disabled */
250 0xf405eefd, 288 0xf405eefd,
251 0x8ed00c0b, 289 0x8ed00c0b,
252 0xc08fd080, 290 0xc08fd080,
291/* 0x018f: mmctx_multi_disabled */
253 0xb70199f0, 292 0xb70199f0,
254 0xc8010080, 293 0xc8010080,
255 0xb4b600ab, 294 0xb4b600ab,
@@ -257,6 +296,8 @@ uint32_t nvc0_grgpc_code[] = {
257 0xb601aec8, 296 0xb601aec8,
258 0xbefd11e4, 297 0xbefd11e4,
259 0x008bd005, 298 0x008bd005,
299/* 0x01a8: mmctx_exec_loop */
300/* 0x01a8: mmctx_wait_free */
260 0xf0008ecf, 301 0xf0008ecf,
261 0x0bf41fe4, 302 0x0bf41fe4,
262 0x00ce98fa, 303 0x00ce98fa,
@@ -265,34 +306,42 @@ uint32_t nvc0_grgpc_code[] = {
265 0x04cdb804, 306 0x04cdb804,
266 0xc8e81bf4, 307 0xc8e81bf4,
267 0x1bf402ab, 308 0x1bf402ab,
309/* 0x01c9: mmctx_fini_wait */
268 0x008bcf18, 310 0x008bcf18,
269 0xb01fb4f0, 311 0xb01fb4f0,
270 0x1bf410b4, 312 0x1bf410b4,
271 0x02a7f0f7, 313 0x02a7f0f7,
272 0xf4c921f4, 314 0xf4c921f4,
315/* 0x01de: mmctx_stop */
273 0xabc81b0e, 316 0xabc81b0e,
274 0x10b4b600, 317 0x10b4b600,
275 0xf00cb9f0, 318 0xf00cb9f0,
276 0x8bd012b9, 319 0x8bd012b9,
320/* 0x01ed: mmctx_stop_wait */
277 0x008bcf00, 321 0x008bcf00,
278 0xf412bbc8, 322 0xf412bbc8,
323/* 0x01f6: mmctx_done */
279 0x87f1fa1b, 324 0x87f1fa1b,
280 0x84b6085c, 325 0x84b6085c,
281 0xf094bd06, 326 0xf094bd06,
282 0x89d00199, 327 0x89d00199,
328/* 0x0207: strand_wait */
283 0xf900f800, 329 0xf900f800,
284 0x02a7f0a0, 330 0x02a7f0a0,
285 0xfcc921f4, 331 0xfcc921f4,
332/* 0x0213: strand_pre */
286 0xf100f8a0, 333 0xf100f8a0,
287 0xf04afc87, 334 0xf04afc87,
288 0x97f00283, 335 0x97f00283,
289 0x0089d00c, 336 0x0089d00c,
290 0x020721f5, 337 0x020721f5,
338/* 0x0226: strand_post */
291 0x87f100f8, 339 0x87f100f8,
292 0x83f04afc, 340 0x83f04afc,
293 0x0d97f002, 341 0x0d97f002,
294 0xf50089d0, 342 0xf50089d0,
295 0xf8020721, 343 0xf8020721,
344/* 0x0239: strand_set */
296 0xfca7f100, 345 0xfca7f100,
297 0x02a3f04f, 346 0x02a3f04f,
298 0x0500aba2, 347 0x0500aba2,
@@ -303,6 +352,7 @@ uint32_t nvc0_grgpc_code[] = {
303 0xf000aed0, 352 0xf000aed0,
304 0xbcd00ac7, 353 0xbcd00ac7,
305 0x0721f500, 354 0x0721f500,
355/* 0x0263: strand_ctx_init */
306 0xf100f802, 356 0xf100f802,
307 0xb6083c87, 357 0xb6083c87,
308 0x94bd0684, 358 0x94bd0684,
@@ -325,6 +375,7 @@ uint32_t nvc0_grgpc_code[] = {
325 0x0684b608, 375 0x0684b608,
326 0xb70089cf, 376 0xb70089cf,
327 0x95220080, 377 0x95220080,
378/* 0x02ba: ctx_init_strand_loop */
328 0x8ed008fe, 379 0x8ed008fe,
329 0x408ed000, 380 0x408ed000,
330 0xb6808acf, 381 0xb6808acf,
@@ -338,12 +389,14 @@ uint32_t nvc0_grgpc_code[] = {
338 0x94bd0684, 389 0x94bd0684,
339 0xd00399f0, 390 0xd00399f0,
340 0x00f80089, 391 0x00f80089,
392/* 0x02ec: error */
341 0xe7f1e0f9, 393 0xe7f1e0f9,
342 0xe3f09814, 394 0xe3f09814,
343 0x8d21f440, 395 0x8d21f440,
344 0x041ce0b7, 396 0x041ce0b7,
345 0xf401f7f0, 397 0xf401f7f0,
346 0xe0fc8d21, 398 0xe0fc8d21,
399/* 0x0306: init */
347 0x04bd00f8, 400 0x04bd00f8,
348 0xf10004fe, 401 0xf10004fe,
349 0xf0120017, 402 0xf0120017,
@@ -366,11 +419,13 @@ uint32_t nvc0_grgpc_code[] = {
366 0x27f10002, 419 0x27f10002,
367 0x24b60800, 420 0x24b60800,
368 0x0022cf06, 421 0x0022cf06,
422/* 0x035f: init_find_chipset */
369 0xb65817f0, 423 0xb65817f0,
370 0x13980c10, 424 0x13980c10,
371 0x0432b800, 425 0x0432b800,
372 0xb00b0bf4, 426 0xb00b0bf4,
373 0x1bf40034, 427 0x1bf40034,
428/* 0x0373: init_context */
374 0xf100f8f1, 429 0xf100f8f1,
375 0xb6080027, 430 0xb6080027,
376 0x22cf0624, 431 0x22cf0624,
@@ -407,6 +462,7 @@ uint32_t nvc0_grgpc_code[] = {
407 0x0010b740, 462 0x0010b740,
408 0xf024bd08, 463 0xf024bd08,
409 0x12d01f29, 464 0x12d01f29,
465/* 0x0401: main */
410 0x0031f400, 466 0x0031f400,
411 0xf00028f4, 467 0xf00028f4,
412 0x21f41cd7, 468 0x21f41cd7,
@@ -419,9 +475,11 @@ uint32_t nvc0_grgpc_code[] = {
419 0xfe051efd, 475 0xfe051efd,
420 0x21f50018, 476 0x21f50018,
421 0x0ef404c3, 477 0x0ef404c3,
478/* 0x0431: main_not_ctx_xfer */
422 0x10ef94d3, 479 0x10ef94d3,
423 0xf501f5f0, 480 0xf501f5f0,
424 0xf402ec21, 481 0xf402ec21,
482/* 0x043e: ih */
425 0x80f9c60e, 483 0x80f9c60e,
426 0xf90188fe, 484 0xf90188fe,
427 0xf990f980, 485 0xf990f980,
@@ -436,30 +494,36 @@ uint32_t nvc0_grgpc_code[] = {
436 0xb0b70421, 494 0xb0b70421,
437 0xe7f00400, 495 0xe7f00400,
438 0x00bed001, 496 0x00bed001,
497/* 0x0474: ih_no_fifo */
439 0xfc400ad0, 498 0xfc400ad0,
440 0xfce0fcf0, 499 0xfce0fcf0,
441 0xfcb0fcd0, 500 0xfcb0fcd0,
442 0xfc90fca0, 501 0xfc90fca0,
443 0x0088fe80, 502 0x0088fe80,
444 0x32f480fc, 503 0x32f480fc,
504/* 0x048f: hub_barrier_done */
445 0xf001f800, 505 0xf001f800,
446 0x0e9801f7, 506 0x0e9801f7,
447 0x04febb00, 507 0x04febb00,
448 0x9418e7f1, 508 0x9418e7f1,
449 0xf440e3f0, 509 0xf440e3f0,
450 0x00f88d21, 510 0x00f88d21,
511/* 0x04a4: ctx_redswitch */
451 0x0614e7f1, 512 0x0614e7f1,
452 0xf006e4b6, 513 0xf006e4b6,
453 0xefd020f7, 514 0xefd020f7,
454 0x08f7f000, 515 0x08f7f000,
516/* 0x04b4: ctx_redswitch_delay */
455 0xf401f2b6, 517 0xf401f2b6,
456 0xf7f1fd1b, 518 0xf7f1fd1b,
457 0xefd00a20, 519 0xefd00a20,
520/* 0x04c3: ctx_xfer */
458 0xf100f800, 521 0xf100f800,
459 0xb60a0417, 522 0xb60a0417,
460 0x1fd00614, 523 0x1fd00614,
461 0x0711f400, 524 0x0711f400,
462 0x04a421f5, 525 0x04a421f5,
526/* 0x04d4: ctx_xfer_not_load */
463 0x4afc17f1, 527 0x4afc17f1,
464 0xf00213f0, 528 0xf00213f0,
465 0x12d00c27, 529 0x12d00c27,
@@ -489,11 +553,13 @@ uint32_t nvc0_grgpc_code[] = {
489 0x5c21f508, 553 0x5c21f508,
490 0x0721f501, 554 0x0721f501,
491 0x0601f402, 555 0x0601f402,
556/* 0x054b: ctx_xfer_post */
492 0xf11412f4, 557 0xf11412f4,
493 0xf04afc17, 558 0xf04afc17,
494 0x27f00213, 559 0x27f00213,
495 0x0012d00d, 560 0x0012d00d,
496 0x020721f5, 561 0x020721f5,
562/* 0x055c: ctx_xfer_done */
497 0x048f21f5, 563 0x048f21f5,
498 0x000000f8, 564 0x000000f8,
499 0x00000000, 565 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
new file mode 100644
index 000000000000..7b715fda2763
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -0,0 +1,451 @@
1/* fuc microcode for nve0 PGRAPH/GPC
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h
28 */
29
30/* TODO
31 * - bracket certain functions with scratch writes, useful for debugging
32 * - watchdog timer around ctx operations
33 */
34
35.section #nve0_grgpc_data
36include(`nve0.fuc')
37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0
40
41tpc_count: .b32 0
42tpc_mask: .b32 0
43tpc_mmio_list_head: .b32 0
44tpc_mmio_list_tail: .b32 0
45
46cmd_queue: queue_init
47
48// chipset descriptions
49chipsets:
50.b8 0xe4 0 0 0
51.b16 #nve4_gpc_mmio_head
52.b16 #nve4_gpc_mmio_tail
53.b16 #nve4_tpc_mmio_head
54.b16 #nve4_tpc_mmio_tail
55.b8 0xe7 0 0 0
56.b16 #nve4_gpc_mmio_head
57.b16 #nve4_gpc_mmio_tail
58.b16 #nve4_tpc_mmio_head
59.b16 #nve4_tpc_mmio_tail
60.b8 0 0 0 0
61
62// GPC mmio lists
63nve4_gpc_mmio_head:
64mmctx_data(0x000380, 1)
65mmctx_data(0x000400, 2)
66mmctx_data(0x00040c, 3)
67mmctx_data(0x000450, 9)
68mmctx_data(0x000600, 1)
69mmctx_data(0x000684, 1)
70mmctx_data(0x000700, 5)
71mmctx_data(0x000800, 1)
72mmctx_data(0x000808, 3)
73mmctx_data(0x000828, 1)
74mmctx_data(0x000830, 1)
75mmctx_data(0x0008d8, 1)
76mmctx_data(0x0008e0, 1)
77mmctx_data(0x0008e8, 6)
78mmctx_data(0x00091c, 1)
79mmctx_data(0x000924, 3)
80mmctx_data(0x000b00, 1)
81mmctx_data(0x000b08, 6)
82mmctx_data(0x000bb8, 1)
83mmctx_data(0x000c08, 1)
84mmctx_data(0x000c10, 8)
85mmctx_data(0x000c40, 1)
86mmctx_data(0x000c6c, 1)
87mmctx_data(0x000c80, 1)
88mmctx_data(0x000c8c, 1)
89mmctx_data(0x001000, 3)
90mmctx_data(0x001014, 1)
91mmctx_data(0x003024, 1)
92mmctx_data(0x0030c0, 2)
93mmctx_data(0x0030e4, 1)
94mmctx_data(0x003100, 6)
95mmctx_data(0x0031d0, 1)
96mmctx_data(0x0031e0, 2)
97nve4_gpc_mmio_tail:
98
99// TPC mmio lists
100nve4_tpc_mmio_head:
101mmctx_data(0x000048, 1)
102mmctx_data(0x000064, 1)
103mmctx_data(0x000088, 1)
104mmctx_data(0x000200, 6)
105mmctx_data(0x00021c, 2)
106mmctx_data(0x000230, 1)
107mmctx_data(0x0002c4, 1)
108mmctx_data(0x000400, 3)
109mmctx_data(0x000420, 3)
110mmctx_data(0x0004e8, 1)
111mmctx_data(0x0004f4, 1)
112mmctx_data(0x000604, 4)
113mmctx_data(0x000644, 22)
114mmctx_data(0x0006ac, 2)
115mmctx_data(0x0006c8, 1)
116mmctx_data(0x000730, 8)
117mmctx_data(0x000758, 1)
118mmctx_data(0x000778, 1)
119nve4_tpc_mmio_tail:
120
121.section #nve0_grgpc_code
122bra #init
123define(`include_code')
124include(`nve0.fuc')
125
126// reports an exception to the host
127//
128// In: $r15 error code (see nve0.fuc)
129//
130error:
131 push $r14
132 mov $r14 -0x67ec // 0x9814
133 sethi $r14 0x400000
134 call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
135 add b32 $r14 0x41c
136 mov $r15 1
137 call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
138 pop $r14
139 ret
140
141// GPC fuc initialisation, executed by triggering ucode start, will
142// fall through to main loop after completion.
143//
144// Input:
145// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
146// CC_SCRATCH[1]: context base
147//
148// Output:
149// CC_SCRATCH[0]:
150// 31:31: set to signal completion
151// CC_SCRATCH[1]:
152// 31:0: GPC context size
153//
154init:
155 clear b32 $r0
156 mov $sp $r0
157
158 // enable fifo access
159 mov $r1 0x1200
160 mov $r2 2
161 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
162
163 // setup i0 handler, and route all interrupts to it
164 mov $r1 #ih
165 mov $iv0 $r1
166 mov $r1 0x400
167 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
168
169 // enable fifo interrupt
170 mov $r2 4
171 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
172
173 // enable interrupts
174 bset $flags ie0
175
176 // figure out which GPC we are, and how many TPCs we have
177 mov $r1 0x608
178 shl b32 $r1 6
179 iord $r2 I[$r1 + 0x000] // UNITS
180 mov $r3 1
181 and $r2 0x1f
182 shl b32 $r3 $r2
183 sub b32 $r3 1
184 st b32 D[$r0 + #tpc_count] $r2
185 st b32 D[$r0 + #tpc_mask] $r3
186 add b32 $r1 0x400
187 iord $r2 I[$r1 + 0x000] // MYINDEX
188 st b32 D[$r0 + #gpc_id] $r2
189
190 // find context data for this chipset
191 mov $r2 0x800
192 shl b32 $r2 6
193 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
194 mov $r1 #chipsets - 12
195 init_find_chipset:
196 add b32 $r1 12
197 ld b32 $r3 D[$r1 + 0x00]
198 cmpu b32 $r3 $r2
199 bra e #init_context
200 cmpu b32 $r3 0
201 bra ne #init_find_chipset
202 // unknown chipset
203 ret
204
205 // initialise context base, and size tracking
206 init_context:
207 mov $r2 0x800
208 shl b32 $r2 6
209 iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
210 clear b32 $r3 // track GPC context size here
211
212 // set mmctx base addresses now so we don't have to do it later,
213 // they don't currently ever change
214 mov $r4 0x700
215 shl b32 $r4 6
216 shr b32 $r5 $r2 8
217 iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
218 iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
219
220 // calculate GPC mmio context size, store the chipset-specific
221 // mmio list pointers somewhere we can get at them later without
222 // re-parsing the chipset list
223 clear b32 $r14
224 clear b32 $r15
225 ld b16 $r14 D[$r1 + 4]
226 ld b16 $r15 D[$r1 + 6]
227 st b16 D[$r0 + #gpc_mmio_list_head] $r14
228 st b16 D[$r0 + #gpc_mmio_list_tail] $r15
229 call #mmctx_size
230 add b32 $r2 $r15
231 add b32 $r3 $r15
232
233 // calculate per-TPC mmio context size, store the list pointers
234 ld b16 $r14 D[$r1 + 8]
235 ld b16 $r15 D[$r1 + 10]
236 st b16 D[$r0 + #tpc_mmio_list_head] $r14
237 st b16 D[$r0 + #tpc_mmio_list_tail] $r15
238 call #mmctx_size
239 ld b32 $r14 D[$r0 + #tpc_count]
240 mulu $r14 $r15
241 add b32 $r2 $r14
242 add b32 $r3 $r14
243
244 // round up base/size to 256 byte boundary (for strand SWBASE)
245 add b32 $r4 0x1300
246 shr b32 $r3 2
247 iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
248 shr b32 $r2 8
249 shr b32 $r3 6
250 add b32 $r2 1
251 add b32 $r3 1
252 shl b32 $r2 8
253 shl b32 $r3 8
254
255 // calculate size of strand context data
256 mov b32 $r15 $r2
257 call #strand_ctx_init
258 add b32 $r3 $r15
259
260 // save context size, and tell HUB we're done
261 mov $r1 0x800
262 shl b32 $r1 6
263 iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
264 add b32 $r1 0x800
265 clear b32 $r2
266 bset $r2 31
267 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
268
269// Main program loop, very simple, sleeps until woken up by the interrupt
270// handler, pulls a command from the queue and executes its handler
271//
272main:
273 bset $flags $p0
274 sleep $p0
275 mov $r13 #cmd_queue
276 call #queue_get
277 bra $p1 #main
278
279 // 0x0000-0x0003 are all context transfers
280 cmpu b32 $r14 0x04
281 bra nc #main_not_ctx_xfer
282 // fetch $flags and mask off $p1/$p2
283 mov $r1 $flags
284 mov $r2 0x0006
285 not b32 $r2
286 and $r1 $r2
287 // set $p1/$p2 according to transfer type
288 shl b32 $r14 1
289 or $r1 $r14
290 mov $flags $r1
291 // transfer context data
292 call #ctx_xfer
293 bra #main
294
295 main_not_ctx_xfer:
296 shl b32 $r15 $r14 16
297 or $r15 E_BAD_COMMAND
298 call #error
299 bra #main
300
301// interrupt handler
302ih:
303 push $r8
304 mov $r8 $flags
305 push $r8
306 push $r9
307 push $r10
308 push $r11
309 push $r13
310 push $r14
311 push $r15
312
313 // incoming fifo command?
314 iord $r10 I[$r0 + 0x200] // INTR
315 and $r11 $r10 0x00000004
316 bra e #ih_no_fifo
317 // queue incoming fifo command for later processing
318 mov $r11 0x1900
319 mov $r13 #cmd_queue
320 iord $r14 I[$r11 + 0x100] // FIFO_CMD
321 iord $r15 I[$r11 + 0x000] // FIFO_DATA
322 call #queue_put
323 add b32 $r11 0x400
324 mov $r14 1
325 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
326
327 // ack, and wake up main()
328 ih_no_fifo:
329 iowr I[$r0 + 0x100] $r10 // INTR_ACK
330
331 pop $r15
332 pop $r14
333 pop $r13
334 pop $r11
335 pop $r10
336 pop $r9
337 pop $r8
338 mov $flags $r8
339 pop $r8
340 bclr $flags $p0
341 iret
342
343// Set this GPC's bit in HUB_BAR, used to signal completion of various
344// activities to the HUB fuc
345//
346hub_barrier_done:
347 mov $r15 1
348 ld b32 $r14 D[$r0 + #gpc_id]
349 shl b32 $r15 $r14
350 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
351 sethi $r14 0x400000
352 call #nv_wr32
353 ret
354
355// Disables various things, waits a bit, and re-enables them..
356//
357// Not sure how exactly this helps, perhaps "ENABLE" is not such a
358// good description for the bits we turn off? Anyways, without this,
359// funny things happen.
360//
361ctx_redswitch:
362 mov $r14 0x614
363 shl b32 $r14 6
364 mov $r15 0x020
365 iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
366 mov $r15 8
367 ctx_redswitch_delay:
368 sub b32 $r15 1
369 bra ne #ctx_redswitch_delay
370 mov $r15 0xa20
371 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
372 ret
373
374// Transfer GPC context data between GPU and storage area
375//
376// In: $r15 context base address
377// $p1 clear on save, set on load
378// $p2 set if opposite direction done/will be done, so:
379// on save it means: "a load will follow this save"
380// on load it means: "a save preceeded this load"
381//
382ctx_xfer:
383 // set context base address
384 mov $r1 0xa04
385 shl b32 $r1 6
386 iowr I[$r1 + 0x000] $r15// MEM_BASE
387 bra not $p1 #ctx_xfer_not_load
388 call #ctx_redswitch
389 ctx_xfer_not_load:
390
391 // strands
392 mov $r1 0x4afc
393 sethi $r1 0x20000
394 mov $r2 0xc
395 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
396 call #strand_wait
397 mov $r2 0x47fc
398 sethi $r2 0x20000
399 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
400 xbit $r2 $flags $p1
401 add b32 $r2 3
402 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
403
404 // mmio context
405 xbit $r10 $flags $p1 // direction
406 or $r10 2 // first
407 mov $r11 0x0000
408 sethi $r11 0x500000
409 ld b32 $r12 D[$r0 + #gpc_id]
410 shl b32 $r12 15
411 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
412 ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
413 ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
414 mov $r14 0 // not multi
415 call #mmctx_xfer
416
417 // per-TPC mmio context
418 xbit $r10 $flags $p1 // direction
419 or $r10 4 // last
420 mov $r11 0x4000
421 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
422 ld b32 $r12 D[$r0 + #gpc_id]
423 shl b32 $r12 15
424 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
425 ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
426 ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
427 ld b32 $r15 D[$r0 + #tpc_mask]
428 mov $r14 0x800 // stride = 0x800
429 call #mmctx_xfer
430
431 // wait for strands to finish
432 call #strand_wait
433
434 // if load, or a save without a load following, do some
435 // unknown stuff that's done after finishing a block of
436 // strand commands
437 bra $p1 #ctx_xfer_post
438 bra not $p2 #ctx_xfer_done
439 ctx_xfer_post:
440 mov $r1 0x4afc
441 sethi $r1 0x20000
442 mov $r2 0xd
443 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
444 call #strand_wait
445
446 // mark completion in HUB's barrier
447 ctx_xfer_done:
448 call #hub_barrier_done
449 ret
450
451.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
new file mode 100644
index 000000000000..26c2165bad0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -0,0 +1,530 @@
1uint32_t nve0_grgpc_data[] = {
2/* 0x0000: gpc_id */
3 0x00000000,
4/* 0x0004: gpc_mmio_list_head */
5 0x00000000,
6/* 0x0008: gpc_mmio_list_tail */
7 0x00000000,
8/* 0x000c: tpc_count */
9 0x00000000,
10/* 0x0010: tpc_mask */
11 0x00000000,
12/* 0x0014: tpc_mmio_list_head */
13 0x00000000,
14/* 0x0018: tpc_mmio_list_tail */
15 0x00000000,
16/* 0x001c: cmd_queue */
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35/* 0x0064: chipsets */
36 0x000000e4,
37 0x01040080,
38 0x014c0104,
39 0x000000e7,
40 0x01040080,
41 0x014c0104,
42 0x00000000,
43/* 0x0080: nve4_gpc_mmio_head */
44 0x00000380,
45 0x04000400,
46 0x0800040c,
47 0x20000450,
48 0x00000600,
49 0x00000684,
50 0x10000700,
51 0x00000800,
52 0x08000808,
53 0x00000828,
54 0x00000830,
55 0x000008d8,
56 0x000008e0,
57 0x140008e8,
58 0x0000091c,
59 0x08000924,
60 0x00000b00,
61 0x14000b08,
62 0x00000bb8,
63 0x00000c08,
64 0x1c000c10,
65 0x00000c40,
66 0x00000c6c,
67 0x00000c80,
68 0x00000c8c,
69 0x08001000,
70 0x00001014,
71 0x00003024,
72 0x040030c0,
73 0x000030e4,
74 0x14003100,
75 0x000031d0,
76 0x040031e0,
77/* 0x0104: nve4_gpc_mmio_tail */
78/* 0x0104: nve4_tpc_mmio_head */
79 0x00000048,
80 0x00000064,
81 0x00000088,
82 0x14000200,
83 0x0400021c,
84 0x00000230,
85 0x000002c4,
86 0x08000400,
87 0x08000420,
88 0x000004e8,
89 0x000004f4,
90 0x0c000604,
91 0x54000644,
92 0x040006ac,
93 0x000006c8,
94 0x1c000730,
95 0x00000758,
96 0x00000778,
97};
98
99uint32_t nve0_grgpc_code[] = {
100 0x03060ef5,
101/* 0x0004: queue_put */
102 0x9800d898,
103 0x86f001d9,
104 0x0489b808,
105 0xf00c1bf4,
106 0x21f502f7,
107 0x00f802ec,
108/* 0x001c: queue_put_next */
109 0xb60798c4,
110 0x8dbb0384,
111 0x0880b600,
112 0x80008e80,
113 0x90b6018f,
114 0x0f94f001,
115 0xf801d980,
116/* 0x0039: queue_get */
117 0x0131f400,
118 0x9800d898,
119 0x89b801d9,
120 0x210bf404,
121 0xb60789c4,
122 0x9dbb0394,
123 0x0890b600,
124 0x98009e98,
125 0x80b6019f,
126 0x0f84f001,
127 0xf400d880,
128/* 0x0066: queue_get_done */
129 0x00f80132,
130/* 0x0068: nv_rd32 */
131 0x0728b7f1,
132 0xb906b4b6,
133 0xc9f002ec,
134 0x00bcd01f,
135/* 0x0078: nv_rd32_wait */
136 0xc800bccf,
137 0x1bf41fcc,
138 0x06a7f0fa,
139 0x010321f5,
140 0xf840bfcf,
141/* 0x008d: nv_wr32 */
142 0x28b7f100,
143 0x06b4b607,
144 0xb980bfd0,
145 0xc9f002ec,
146 0x1ec9f01f,
147/* 0x00a3: nv_wr32_wait */
148 0xcf00bcd0,
149 0xccc800bc,
150 0xfa1bf41f,
151/* 0x00ae: watchdog_reset */
152 0x87f100f8,
153 0x84b60430,
154 0x1ff9f006,
155 0xf8008fd0,
156/* 0x00bd: watchdog_clear */
157 0x3087f100,
158 0x0684b604,
159 0xf80080d0,
160/* 0x00c9: wait_donez */
161 0x3c87f100,
162 0x0684b608,
163 0x99f094bd,
164 0x0089d000,
165 0x081887f1,
166 0xd00684b6,
167/* 0x00e2: wait_done_wait_donez */
168 0x87f1008a,
169 0x84b60400,
170 0x0088cf06,
171 0xf4888aff,
172 0x87f1f31b,
173 0x84b6085c,
174 0xf094bd06,
175 0x89d00099,
176/* 0x0103: wait_doneo */
177 0xf100f800,
178 0xb6083c87,
179 0x94bd0684,
180 0xd00099f0,
181 0x87f10089,
182 0x84b60818,
183 0x008ad006,
184/* 0x011c: wait_done_wait_doneo */
185 0x040087f1,
186 0xcf0684b6,
187 0x8aff0088,
188 0xf30bf488,
189 0x085c87f1,
190 0xbd0684b6,
191 0x0099f094,
192 0xf80089d0,
193/* 0x013d: mmctx_size */
194/* 0x013f: nv_mmctx_size_loop */
195 0x9894bd00,
196 0x85b600e8,
197 0x0180b61a,
198 0xbb0284b6,
199 0xe0b60098,
200 0x04efb804,
201 0xb9eb1bf4,
202 0x00f8029f,
203/* 0x015c: mmctx_xfer */
204 0x083c87f1,
205 0xbd0684b6,
206 0x0199f094,
207 0xf10089d0,
208 0xb6071087,
209 0x94bd0684,
210 0xf405bbfd,
211 0x8bd0090b,
212 0x0099f000,
213/* 0x0180: mmctx_base_disabled */
214 0xf405eefd,
215 0x8ed00c0b,
216 0xc08fd080,
217/* 0x018f: mmctx_multi_disabled */
218 0xb70199f0,
219 0xc8010080,
220 0xb4b600ab,
221 0x0cb9f010,
222 0xb601aec8,
223 0xbefd11e4,
224 0x008bd005,
225/* 0x01a8: mmctx_exec_loop */
226/* 0x01a8: mmctx_wait_free */
227 0xf0008ecf,
228 0x0bf41fe4,
229 0x00ce98fa,
230 0xd005e9fd,
231 0xc0b6c08e,
232 0x04cdb804,
233 0xc8e81bf4,
234 0x1bf402ab,
235/* 0x01c9: mmctx_fini_wait */
236 0x008bcf18,
237 0xb01fb4f0,
238 0x1bf410b4,
239 0x02a7f0f7,
240 0xf4c921f4,
241/* 0x01de: mmctx_stop */
242 0xabc81b0e,
243 0x10b4b600,
244 0xf00cb9f0,
245 0x8bd012b9,
246/* 0x01ed: mmctx_stop_wait */
247 0x008bcf00,
248 0xf412bbc8,
249/* 0x01f6: mmctx_done */
250 0x87f1fa1b,
251 0x84b6085c,
252 0xf094bd06,
253 0x89d00199,
254/* 0x0207: strand_wait */
255 0xf900f800,
256 0x02a7f0a0,
257 0xfcc921f4,
258/* 0x0213: strand_pre */
259 0xf100f8a0,
260 0xf04afc87,
261 0x97f00283,
262 0x0089d00c,
263 0x020721f5,
264/* 0x0226: strand_post */
265 0x87f100f8,
266 0x83f04afc,
267 0x0d97f002,
268 0xf50089d0,
269 0xf8020721,
270/* 0x0239: strand_set */
271 0xfca7f100,
272 0x02a3f04f,
273 0x0500aba2,
274 0xd00fc7f0,
275 0xc7f000ac,
276 0x00bcd00b,
277 0x020721f5,
278 0xf000aed0,
279 0xbcd00ac7,
280 0x0721f500,
281/* 0x0263: strand_ctx_init */
282 0xf100f802,
283 0xb6083c87,
284 0x94bd0684,
285 0xd00399f0,
286 0x21f50089,
287 0xe7f00213,
288 0x3921f503,
289 0xfca7f102,
290 0x02a3f046,
291 0x0400aba0,
292 0xf040a0d0,
293 0xbcd001c7,
294 0x0721f500,
295 0x010c9202,
296 0xf000acd0,
297 0xbcd002c7,
298 0x0721f500,
299 0x2621f502,
300 0x8087f102,
301 0x0684b608,
302 0xb70089cf,
303 0x95220080,
304/* 0x02ba: ctx_init_strand_loop */
305 0x8ed008fe,
306 0x408ed000,
307 0xb6808acf,
308 0xa0b606a5,
309 0x00eabb01,
310 0xb60480b6,
311 0x1bf40192,
312 0x08e4b6e8,
313 0xf1f2efbc,
314 0xb6085c87,
315 0x94bd0684,
316 0xd00399f0,
317 0x00f80089,
318/* 0x02ec: error */
319 0xe7f1e0f9,
320 0xe3f09814,
321 0x8d21f440,
322 0x041ce0b7,
323 0xf401f7f0,
324 0xe0fc8d21,
325/* 0x0306: init */
326 0x04bd00f8,
327 0xf10004fe,
328 0xf0120017,
329 0x12d00227,
330 0x3e17f100,
331 0x0010fe04,
332 0x040017f1,
333 0xf0c010d0,
334 0x12d00427,
335 0x1031f400,
336 0x060817f1,
337 0xcf0614b6,
338 0x37f00012,
339 0x1f24f001,
340 0xb60432bb,
341 0x02800132,
342 0x04038003,
343 0x040010b7,
344 0x800012cf,
345 0x27f10002,
346 0x24b60800,
347 0x0022cf06,
348/* 0x035f: init_find_chipset */
349 0xb65817f0,
350 0x13980c10,
351 0x0432b800,
352 0xb00b0bf4,
353 0x1bf40034,
354/* 0x0373: init_context */
355 0xf100f8f1,
356 0xb6080027,
357 0x22cf0624,
358 0xf134bd40,
359 0xb6070047,
360 0x25950644,
361 0x0045d008,
362 0xbd4045d0,
363 0x58f4bde4,
364 0x1f58021e,
365 0x020e4003,
366 0xf5040f40,
367 0xbb013d21,
368 0x3fbb002f,
369 0x041e5800,
370 0x40051f58,
371 0x0f400a0e,
372 0x3d21f50c,
373 0x030e9801,
374 0xbb00effd,
375 0x3ebb002e,
376 0x0040b700,
377 0x0235b613,
378 0xb60043d0,
379 0x35b60825,
380 0x0120b606,
381 0xb60130b6,
382 0x34b60824,
383 0x022fb908,
384 0x026321f5,
385 0xf1003fbb,
386 0xb6080017,
387 0x13d00614,
388 0x0010b740,
389 0xf024bd08,
390 0x12d01f29,
391/* 0x0401: main */
392 0x0031f400,
393 0xf00028f4,
394 0x21f41cd7,
395 0xf401f439,
396 0xf404e4b0,
397 0x81fe1e18,
398 0x0627f001,
399 0x12fd20bd,
400 0x01e4b604,
401 0xfe051efd,
402 0x21f50018,
403 0x0ef404c3,
404/* 0x0431: main_not_ctx_xfer */
405 0x10ef94d3,
406 0xf501f5f0,
407 0xf402ec21,
408/* 0x043e: ih */
409 0x80f9c60e,
410 0xf90188fe,
411 0xf990f980,
412 0xf9b0f9a0,
413 0xf9e0f9d0,
414 0x800acff0,
415 0xf404abc4,
416 0xb7f11d0b,
417 0xd7f01900,
418 0x40becf1c,
419 0xf400bfcf,
420 0xb0b70421,
421 0xe7f00400,
422 0x00bed001,
423/* 0x0474: ih_no_fifo */
424 0xfc400ad0,
425 0xfce0fcf0,
426 0xfcb0fcd0,
427 0xfc90fca0,
428 0x0088fe80,
429 0x32f480fc,
430/* 0x048f: hub_barrier_done */
431 0xf001f800,
432 0x0e9801f7,
433 0x04febb00,
434 0x9418e7f1,
435 0xf440e3f0,
436 0x00f88d21,
437/* 0x04a4: ctx_redswitch */
438 0x0614e7f1,
439 0xf006e4b6,
440 0xefd020f7,
441 0x08f7f000,
442/* 0x04b4: ctx_redswitch_delay */
443 0xf401f2b6,
444 0xf7f1fd1b,
445 0xefd00a20,
446/* 0x04c3: ctx_xfer */
447 0xf100f800,
448 0xb60a0417,
449 0x1fd00614,
450 0x0711f400,
451 0x04a421f5,
452/* 0x04d4: ctx_xfer_not_load */
453 0x4afc17f1,
454 0xf00213f0,
455 0x12d00c27,
456 0x0721f500,
457 0xfc27f102,
458 0x0223f047,
459 0xf00020d0,
460 0x20b6012c,
461 0x0012d003,
462 0xf001acf0,
463 0xb7f002a5,
464 0x50b3f000,
465 0xb6000c98,
466 0xbcbb0fc4,
467 0x010c9800,
468 0xf0020d98,
469 0x21f500e7,
470 0xacf0015c,
471 0x04a5f001,
472 0x4000b7f1,
473 0x9850b3f0,
474 0xc4b6000c,
475 0x00bcbb0f,
476 0x98050c98,
477 0x0f98060d,
478 0x00e7f104,
479 0x5c21f508,
480 0x0721f501,
481 0x0601f402,
482/* 0x054b: ctx_xfer_post */
483 0xf11412f4,
484 0xf04afc17,
485 0x27f00213,
486 0x0012d00d,
487 0x020721f5,
488/* 0x055c: ctx_xfer_done */
489 0x048f21f5,
490 0x000000f8,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 98acddb2c5bb..acfc457654bd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -24,11 +24,11 @@
24 */ 24 */
25 25
26/* To build: 26/* To build:
27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h 27 * m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h
28 */ 28 */
29 29
30.section #nvc0_grhub_data 30.section #nvc0_grhub_data
31include(`nvc0_graph.fuc') 31include(`nvc0.fuc')
32gpc_count: .b32 0 32gpc_count: .b32 0
33rop_count: .b32 0 33rop_count: .b32 0
34cmd_queue: queue_init 34cmd_queue: queue_init
@@ -161,11 +161,11 @@ xfer_data: .b32 0
161.section #nvc0_grhub_code 161.section #nvc0_grhub_code
162bra #init 162bra #init
163define(`include_code') 163define(`include_code')
164include(`nvc0_graph.fuc') 164include(`nvc0.fuc')
165 165
166// reports an exception to the host 166// reports an exception to the host
167// 167//
168// In: $r15 error code (see nvc0_graph.fuc) 168// In: $r15 error code (see nvc0.fuc)
169// 169//
170error: 170error:
171 push $r14 171 push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index c5ed307abeb9..85a8d556f484 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -1,6 +1,9 @@
1uint32_t nvc0_grhub_data[] = { 1uint32_t nvc0_grhub_data[] = {
2/* 0x0000: gpc_count */
2 0x00000000, 3 0x00000000,
4/* 0x0004: rop_count */
3 0x00000000, 5 0x00000000,
6/* 0x0008: cmd_queue */
4 0x00000000, 7 0x00000000,
5 0x00000000, 8 0x00000000,
6 0x00000000, 9 0x00000000,
@@ -19,9 +22,13 @@ uint32_t nvc0_grhub_data[] = {
19 0x00000000, 22 0x00000000,
20 0x00000000, 23 0x00000000,
21 0x00000000, 24 0x00000000,
25/* 0x0050: hub_mmio_list_head */
22 0x00000000, 26 0x00000000,
27/* 0x0054: hub_mmio_list_tail */
23 0x00000000, 28 0x00000000,
29/* 0x0058: ctx_current */
24 0x00000000, 30 0x00000000,
31/* 0x005c: chipsets */
25 0x000000c0, 32 0x000000c0,
26 0x013c00a0, 33 0x013c00a0,
27 0x000000c1, 34 0x000000c1,
@@ -39,6 +46,7 @@ uint32_t nvc0_grhub_data[] = {
39 0x000000d9, 46 0x000000d9,
40 0x01dc0140, 47 0x01dc0140,
41 0x00000000, 48 0x00000000,
49/* 0x00a0: nvc0_hub_mmio_head */
42 0x0417e91c, 50 0x0417e91c,
43 0x04400204, 51 0x04400204,
44 0x28404004, 52 0x28404004,
@@ -78,7 +86,10 @@ uint32_t nvc0_grhub_data[] = {
78 0x08408800, 86 0x08408800,
79 0x0c408900, 87 0x0c408900,
80 0x00408980, 88 0x00408980,
89/* 0x013c: nvc0_hub_mmio_tail */
81 0x044064c0, 90 0x044064c0,
91/* 0x0140: nvc1_hub_mmio_tail */
92/* 0x0140: nvd9_hub_mmio_head */
82 0x0417e91c, 93 0x0417e91c,
83 0x04400204, 94 0x04400204,
84 0x24404004, 95 0x24404004,
@@ -118,6 +129,7 @@ uint32_t nvc0_grhub_data[] = {
118 0x08408800, 129 0x08408800,
119 0x0c408900, 130 0x0c408900,
120 0x00408980, 131 0x00408980,
132/* 0x01dc: nvd9_hub_mmio_tail */
121 0x00000000, 133 0x00000000,
122 0x00000000, 134 0x00000000,
123 0x00000000, 135 0x00000000,
@@ -127,7 +139,10 @@ uint32_t nvc0_grhub_data[] = {
127 0x00000000, 139 0x00000000,
128 0x00000000, 140 0x00000000,
129 0x00000000, 141 0x00000000,
142/* 0x0200: chan_data */
143/* 0x0200: chan_mmio_count */
130 0x00000000, 144 0x00000000,
145/* 0x0204: chan_mmio_address */
131 0x00000000, 146 0x00000000,
132 0x00000000, 147 0x00000000,
133 0x00000000, 148 0x00000000,
@@ -191,17 +206,20 @@ uint32_t nvc0_grhub_data[] = {
191 0x00000000, 206 0x00000000,
192 0x00000000, 207 0x00000000,
193 0x00000000, 208 0x00000000,
209/* 0x0300: xfer_data */
194 0x00000000, 210 0x00000000,
195}; 211};
196 212
197uint32_t nvc0_grhub_code[] = { 213uint32_t nvc0_grhub_code[] = {
198 0x03090ef5, 214 0x03090ef5,
215/* 0x0004: queue_put */
199 0x9800d898, 216 0x9800d898,
200 0x86f001d9, 217 0x86f001d9,
201 0x0489b808, 218 0x0489b808,
202 0xf00c1bf4, 219 0xf00c1bf4,
203 0x21f502f7, 220 0x21f502f7,
204 0x00f802ec, 221 0x00f802ec,
222/* 0x001c: queue_put_next */
205 0xb60798c4, 223 0xb60798c4,
206 0x8dbb0384, 224 0x8dbb0384,
207 0x0880b600, 225 0x0880b600,
@@ -209,6 +227,7 @@ uint32_t nvc0_grhub_code[] = {
209 0x90b6018f, 227 0x90b6018f,
210 0x0f94f001, 228 0x0f94f001,
211 0xf801d980, 229 0xf801d980,
230/* 0x0039: queue_get */
212 0x0131f400, 231 0x0131f400,
213 0x9800d898, 232 0x9800d898,
214 0x89b801d9, 233 0x89b801d9,
@@ -220,37 +239,46 @@ uint32_t nvc0_grhub_code[] = {
220 0x80b6019f, 239 0x80b6019f,
221 0x0f84f001, 240 0x0f84f001,
222 0xf400d880, 241 0xf400d880,
242/* 0x0066: queue_get_done */
223 0x00f80132, 243 0x00f80132,
244/* 0x0068: nv_rd32 */
224 0x0728b7f1, 245 0x0728b7f1,
225 0xb906b4b6, 246 0xb906b4b6,
226 0xc9f002ec, 247 0xc9f002ec,
227 0x00bcd01f, 248 0x00bcd01f,
249/* 0x0078: nv_rd32_wait */
228 0xc800bccf, 250 0xc800bccf,
229 0x1bf41fcc, 251 0x1bf41fcc,
230 0x06a7f0fa, 252 0x06a7f0fa,
231 0x010321f5, 253 0x010321f5,
232 0xf840bfcf, 254 0xf840bfcf,
255/* 0x008d: nv_wr32 */
233 0x28b7f100, 256 0x28b7f100,
234 0x06b4b607, 257 0x06b4b607,
235 0xb980bfd0, 258 0xb980bfd0,
236 0xc9f002ec, 259 0xc9f002ec,
237 0x1ec9f01f, 260 0x1ec9f01f,
261/* 0x00a3: nv_wr32_wait */
238 0xcf00bcd0, 262 0xcf00bcd0,
239 0xccc800bc, 263 0xccc800bc,
240 0xfa1bf41f, 264 0xfa1bf41f,
265/* 0x00ae: watchdog_reset */
241 0x87f100f8, 266 0x87f100f8,
242 0x84b60430, 267 0x84b60430,
243 0x1ff9f006, 268 0x1ff9f006,
244 0xf8008fd0, 269 0xf8008fd0,
270/* 0x00bd: watchdog_clear */
245 0x3087f100, 271 0x3087f100,
246 0x0684b604, 272 0x0684b604,
247 0xf80080d0, 273 0xf80080d0,
274/* 0x00c9: wait_donez */
248 0x3c87f100, 275 0x3c87f100,
249 0x0684b608, 276 0x0684b608,
250 0x99f094bd, 277 0x99f094bd,
251 0x0089d000, 278 0x0089d000,
252 0x081887f1, 279 0x081887f1,
253 0xd00684b6, 280 0xd00684b6,
281/* 0x00e2: wait_done_wait_donez */
254 0x87f1008a, 282 0x87f1008a,
255 0x84b60400, 283 0x84b60400,
256 0x0088cf06, 284 0x0088cf06,
@@ -259,6 +287,7 @@ uint32_t nvc0_grhub_code[] = {
259 0x84b6085c, 287 0x84b6085c,
260 0xf094bd06, 288 0xf094bd06,
261 0x89d00099, 289 0x89d00099,
290/* 0x0103: wait_doneo */
262 0xf100f800, 291 0xf100f800,
263 0xb6083c87, 292 0xb6083c87,
264 0x94bd0684, 293 0x94bd0684,
@@ -266,6 +295,7 @@ uint32_t nvc0_grhub_code[] = {
266 0x87f10089, 295 0x87f10089,
267 0x84b60818, 296 0x84b60818,
268 0x008ad006, 297 0x008ad006,
298/* 0x011c: wait_done_wait_doneo */
269 0x040087f1, 299 0x040087f1,
270 0xcf0684b6, 300 0xcf0684b6,
271 0x8aff0088, 301 0x8aff0088,
@@ -274,6 +304,8 @@ uint32_t nvc0_grhub_code[] = {
274 0xbd0684b6, 304 0xbd0684b6,
275 0x0099f094, 305 0x0099f094,
276 0xf80089d0, 306 0xf80089d0,
307/* 0x013d: mmctx_size */
308/* 0x013f: nv_mmctx_size_loop */
277 0x9894bd00, 309 0x9894bd00,
278 0x85b600e8, 310 0x85b600e8,
279 0x0180b61a, 311 0x0180b61a,
@@ -282,6 +314,7 @@ uint32_t nvc0_grhub_code[] = {
282 0x04efb804, 314 0x04efb804,
283 0xb9eb1bf4, 315 0xb9eb1bf4,
284 0x00f8029f, 316 0x00f8029f,
317/* 0x015c: mmctx_xfer */
285 0x083c87f1, 318 0x083c87f1,
286 0xbd0684b6, 319 0xbd0684b6,
287 0x0199f094, 320 0x0199f094,
@@ -291,9 +324,11 @@ uint32_t nvc0_grhub_code[] = {
291 0xf405bbfd, 324 0xf405bbfd,
292 0x8bd0090b, 325 0x8bd0090b,
293 0x0099f000, 326 0x0099f000,
327/* 0x0180: mmctx_base_disabled */
294 0xf405eefd, 328 0xf405eefd,
295 0x8ed00c0b, 329 0x8ed00c0b,
296 0xc08fd080, 330 0xc08fd080,
331/* 0x018f: mmctx_multi_disabled */
297 0xb70199f0, 332 0xb70199f0,
298 0xc8010080, 333 0xc8010080,
299 0xb4b600ab, 334 0xb4b600ab,
@@ -301,6 +336,8 @@ uint32_t nvc0_grhub_code[] = {
301 0xb601aec8, 336 0xb601aec8,
302 0xbefd11e4, 337 0xbefd11e4,
303 0x008bd005, 338 0x008bd005,
339/* 0x01a8: mmctx_exec_loop */
340/* 0x01a8: mmctx_wait_free */
304 0xf0008ecf, 341 0xf0008ecf,
305 0x0bf41fe4, 342 0x0bf41fe4,
306 0x00ce98fa, 343 0x00ce98fa,
@@ -309,34 +346,42 @@ uint32_t nvc0_grhub_code[] = {
309 0x04cdb804, 346 0x04cdb804,
310 0xc8e81bf4, 347 0xc8e81bf4,
311 0x1bf402ab, 348 0x1bf402ab,
349/* 0x01c9: mmctx_fini_wait */
312 0x008bcf18, 350 0x008bcf18,
313 0xb01fb4f0, 351 0xb01fb4f0,
314 0x1bf410b4, 352 0x1bf410b4,
315 0x02a7f0f7, 353 0x02a7f0f7,
316 0xf4c921f4, 354 0xf4c921f4,
355/* 0x01de: mmctx_stop */
317 0xabc81b0e, 356 0xabc81b0e,
318 0x10b4b600, 357 0x10b4b600,
319 0xf00cb9f0, 358 0xf00cb9f0,
320 0x8bd012b9, 359 0x8bd012b9,
360/* 0x01ed: mmctx_stop_wait */
321 0x008bcf00, 361 0x008bcf00,
322 0xf412bbc8, 362 0xf412bbc8,
363/* 0x01f6: mmctx_done */
323 0x87f1fa1b, 364 0x87f1fa1b,
324 0x84b6085c, 365 0x84b6085c,
325 0xf094bd06, 366 0xf094bd06,
326 0x89d00199, 367 0x89d00199,
368/* 0x0207: strand_wait */
327 0xf900f800, 369 0xf900f800,
328 0x02a7f0a0, 370 0x02a7f0a0,
329 0xfcc921f4, 371 0xfcc921f4,
372/* 0x0213: strand_pre */
330 0xf100f8a0, 373 0xf100f8a0,
331 0xf04afc87, 374 0xf04afc87,
332 0x97f00283, 375 0x97f00283,
333 0x0089d00c, 376 0x0089d00c,
334 0x020721f5, 377 0x020721f5,
378/* 0x0226: strand_post */
335 0x87f100f8, 379 0x87f100f8,
336 0x83f04afc, 380 0x83f04afc,
337 0x0d97f002, 381 0x0d97f002,
338 0xf50089d0, 382 0xf50089d0,
339 0xf8020721, 383 0xf8020721,
384/* 0x0239: strand_set */
340 0xfca7f100, 385 0xfca7f100,
341 0x02a3f04f, 386 0x02a3f04f,
342 0x0500aba2, 387 0x0500aba2,
@@ -347,6 +392,7 @@ uint32_t nvc0_grhub_code[] = {
347 0xf000aed0, 392 0xf000aed0,
348 0xbcd00ac7, 393 0xbcd00ac7,
349 0x0721f500, 394 0x0721f500,
395/* 0x0263: strand_ctx_init */
350 0xf100f802, 396 0xf100f802,
351 0xb6083c87, 397 0xb6083c87,
352 0x94bd0684, 398 0x94bd0684,
@@ -369,6 +415,7 @@ uint32_t nvc0_grhub_code[] = {
369 0x0684b608, 415 0x0684b608,
370 0xb70089cf, 416 0xb70089cf,
371 0x95220080, 417 0x95220080,
418/* 0x02ba: ctx_init_strand_loop */
372 0x8ed008fe, 419 0x8ed008fe,
373 0x408ed000, 420 0x408ed000,
374 0xb6808acf, 421 0xb6808acf,
@@ -382,6 +429,7 @@ uint32_t nvc0_grhub_code[] = {
382 0x94bd0684, 429 0x94bd0684,
383 0xd00399f0, 430 0xd00399f0,
384 0x00f80089, 431 0x00f80089,
432/* 0x02ec: error */
385 0xe7f1e0f9, 433 0xe7f1e0f9,
386 0xe4b60814, 434 0xe4b60814,
387 0x00efd006, 435 0x00efd006,
@@ -389,6 +437,7 @@ uint32_t nvc0_grhub_code[] = {
389 0xf006e4b6, 437 0xf006e4b6,
390 0xefd001f7, 438 0xefd001f7,
391 0xf8e0fc00, 439 0xf8e0fc00,
440/* 0x0309: init */
392 0xfe04bd00, 441 0xfe04bd00,
393 0x07fe0004, 442 0x07fe0004,
394 0x0017f100, 443 0x0017f100,
@@ -429,11 +478,13 @@ uint32_t nvc0_grhub_code[] = {
429 0x080027f1, 478 0x080027f1,
430 0xcf0624b6, 479 0xcf0624b6,
431 0xf7f00022, 480 0xf7f00022,
481/* 0x03a9: init_find_chipset */
432 0x08f0b654, 482 0x08f0b654,
433 0xb800f398, 483 0xb800f398,
434 0x0bf40432, 484 0x0bf40432,
435 0x0034b00b, 485 0x0034b00b,
436 0xf8f11bf4, 486 0xf8f11bf4,
487/* 0x03bd: init_context */
437 0x0017f100, 488 0x0017f100,
438 0x02fe5801, 489 0x02fe5801,
439 0xf003ff58, 490 0xf003ff58,
@@ -454,6 +505,7 @@ uint32_t nvc0_grhub_code[] = {
454 0x001fbb02, 505 0x001fbb02,
455 0xf1000398, 506 0xf1000398,
456 0xf0200047, 507 0xf0200047,
508/* 0x040e: init_gpc */
457 0x4ea05043, 509 0x4ea05043,
458 0x1fb90804, 510 0x1fb90804,
459 0x8d21f402, 511 0x8d21f402,
@@ -467,6 +519,7 @@ uint32_t nvc0_grhub_code[] = {
467 0xf7f00100, 519 0xf7f00100,
468 0x8d21f402, 520 0x8d21f402,
469 0x08004ea0, 521 0x08004ea0,
522/* 0x0440: init_gpc_wait */
470 0xc86821f4, 523 0xc86821f4,
471 0x0bf41fff, 524 0x0bf41fff,
472 0x044ea0fa, 525 0x044ea0fa,
@@ -479,6 +532,7 @@ uint32_t nvc0_grhub_code[] = {
479 0xb74021d0, 532 0xb74021d0,
480 0xbd080020, 533 0xbd080020,
481 0x1f19f014, 534 0x1f19f014,
535/* 0x0473: main */
482 0xf40021d0, 536 0xf40021d0,
483 0x28f40031, 537 0x28f40031,
484 0x08d7f000, 538 0x08d7f000,
@@ -517,6 +571,7 @@ uint32_t nvc0_grhub_code[] = {
517 0x94bd0684, 571 0x94bd0684,
518 0xd00699f0, 572 0xd00699f0,
519 0x0ef40089, 573 0x0ef40089,
574/* 0x0509: chsw_prev_no_next */
520 0xb920f931, 575 0xb920f931,
521 0x32f40212, 576 0x32f40212,
522 0x0232f401, 577 0x0232f401,
@@ -524,10 +579,12 @@ uint32_t nvc0_grhub_code[] = {
524 0x17f120fc, 579 0x17f120fc,
525 0x14b60b00, 580 0x14b60b00,
526 0x0012d006, 581 0x0012d006,
582/* 0x0527: chsw_no_prev */
527 0xc8130ef4, 583 0xc8130ef4,
528 0x0bf41f23, 584 0x0bf41f23,
529 0x0131f40d, 585 0x0131f40d,
530 0xf50232f4, 586 0xf50232f4,
587/* 0x0537: chsw_done */
531 0xf1082921, 588 0xf1082921,
532 0xb60b0c17, 589 0xb60b0c17,
533 0x27f00614, 590 0x27f00614,
@@ -536,10 +593,12 @@ uint32_t nvc0_grhub_code[] = {
536 0xbd0684b6, 593 0xbd0684b6,
537 0x0499f094, 594 0x0499f094,
538 0xf50089d0, 595 0xf50089d0,
596/* 0x0557: main_not_ctx_switch */
539 0xb0ff200e, 597 0xb0ff200e,
540 0x1bf401e4, 598 0x1bf401e4,
541 0x02f2b90d, 599 0x02f2b90d,
542 0x07b521f5, 600 0x07b521f5,
601/* 0x0567: main_not_ctx_chan */
543 0xb0420ef4, 602 0xb0420ef4,
544 0x1bf402e4, 603 0x1bf402e4,
545 0x3c87f12e, 604 0x3c87f12e,
@@ -553,14 +612,17 @@ uint32_t nvc0_grhub_code[] = {
553 0xf094bd06, 612 0xf094bd06,
554 0x89d00799, 613 0x89d00799,
555 0x110ef400, 614 0x110ef400,
615/* 0x0598: main_not_ctx_save */
556 0xf010ef94, 616 0xf010ef94,
557 0x21f501f5, 617 0x21f501f5,
558 0x0ef502ec, 618 0x0ef502ec,
619/* 0x05a6: main_done */
559 0x17f1fed1, 620 0x17f1fed1,
560 0x14b60820, 621 0x14b60820,
561 0xf024bd06, 622 0xf024bd06,
562 0x12d01f29, 623 0x12d01f29,
563 0xbe0ef500, 624 0xbe0ef500,
625/* 0x05b9: ih */
564 0xfe80f9fe, 626 0xfe80f9fe,
565 0x80f90188, 627 0x80f90188,
566 0xa0f990f9, 628 0xa0f990f9,
@@ -574,16 +636,19 @@ uint32_t nvc0_grhub_code[] = {
574 0x21f400bf, 636 0x21f400bf,
575 0x00b0b704, 637 0x00b0b704,
576 0x01e7f004, 638 0x01e7f004,
639/* 0x05ef: ih_no_fifo */
577 0xe400bed0, 640 0xe400bed0,
578 0xf40100ab, 641 0xf40100ab,
579 0xd7f00d0b, 642 0xd7f00d0b,
580 0x01e7f108, 643 0x01e7f108,
581 0x0421f440, 644 0x0421f440,
645/* 0x0600: ih_no_ctxsw */
582 0x0104b7f1, 646 0x0104b7f1,
583 0xabffb0bd, 647 0xabffb0bd,
584 0x0d0bf4b4, 648 0x0d0bf4b4,
585 0x0c1ca7f1, 649 0x0c1ca7f1,
586 0xd006a4b6, 650 0xd006a4b6,
651/* 0x0616: ih_no_other */
587 0x0ad000ab, 652 0x0ad000ab,
588 0xfcf0fc40, 653 0xfcf0fc40,
589 0xfcd0fce0, 654 0xfcd0fce0,
@@ -591,32 +656,40 @@ uint32_t nvc0_grhub_code[] = {
591 0xfe80fc90, 656 0xfe80fc90,
592 0x80fc0088, 657 0x80fc0088,
593 0xf80032f4, 658 0xf80032f4,
659/* 0x0631: ctx_4160s */
594 0x60e7f101, 660 0x60e7f101,
595 0x40e3f041, 661 0x40e3f041,
596 0xf401f7f0, 662 0xf401f7f0,
663/* 0x063e: ctx_4160s_wait */
597 0x21f48d21, 664 0x21f48d21,
598 0x04ffc868, 665 0x04ffc868,
599 0xf8fa0bf4, 666 0xf8fa0bf4,
667/* 0x0649: ctx_4160c */
600 0x60e7f100, 668 0x60e7f100,
601 0x40e3f041, 669 0x40e3f041,
602 0x21f4f4bd, 670 0x21f4f4bd,
671/* 0x0657: ctx_4170s */
603 0xf100f88d, 672 0xf100f88d,
604 0xf04170e7, 673 0xf04170e7,
605 0xf5f040e3, 674 0xf5f040e3,
606 0x8d21f410, 675 0x8d21f410,
676/* 0x0666: ctx_4170w */
607 0xe7f100f8, 677 0xe7f100f8,
608 0xe3f04170, 678 0xe3f04170,
609 0x6821f440, 679 0x6821f440,
610 0xf410f4f0, 680 0xf410f4f0,
611 0x00f8f31b, 681 0x00f8f31b,
682/* 0x0678: ctx_redswitch */
612 0x0614e7f1, 683 0x0614e7f1,
613 0xf106e4b6, 684 0xf106e4b6,
614 0xd00270f7, 685 0xd00270f7,
615 0xf7f000ef, 686 0xf7f000ef,
687/* 0x0689: ctx_redswitch_delay */
616 0x01f2b608, 688 0x01f2b608,
617 0xf1fd1bf4, 689 0xf1fd1bf4,
618 0xd00770f7, 690 0xd00770f7,
619 0x00f800ef, 691 0x00f800ef,
692/* 0x0698: ctx_86c */
620 0x086ce7f1, 693 0x086ce7f1,
621 0xd006e4b6, 694 0xd006e4b6,
622 0xe7f100ef, 695 0xe7f100ef,
@@ -625,6 +698,7 @@ uint32_t nvc0_grhub_code[] = {
625 0xa86ce7f1, 698 0xa86ce7f1,
626 0xf441e3f0, 699 0xf441e3f0,
627 0x00f88d21, 700 0x00f88d21,
701/* 0x06b8: ctx_load */
628 0x083c87f1, 702 0x083c87f1,
629 0xbd0684b6, 703 0xbd0684b6,
630 0x0599f094, 704 0x0599f094,
@@ -639,6 +713,7 @@ uint32_t nvc0_grhub_code[] = {
639 0x0614b60a, 713 0x0614b60a,
640 0xd00747f0, 714 0xd00747f0,
641 0x14d00012, 715 0x14d00012,
716/* 0x06f1: ctx_chan_wait_0 */
642 0x4014cf40, 717 0x4014cf40,
643 0xf41f44f0, 718 0xf41f44f0,
644 0x32d0fa1b, 719 0x32d0fa1b,
@@ -688,6 +763,7 @@ uint32_t nvc0_grhub_code[] = {
688 0xbd0684b6, 763 0xbd0684b6,
689 0x0599f094, 764 0x0599f094,
690 0xf80089d0, 765 0xf80089d0,
766/* 0x07b5: ctx_chan */
691 0x3121f500, 767 0x3121f500,
692 0xb821f506, 768 0xb821f506,
693 0x0ca7f006, 769 0x0ca7f006,
@@ -695,39 +771,48 @@ uint32_t nvc0_grhub_code[] = {
695 0xb60a1017, 771 0xb60a1017,
696 0x27f00614, 772 0x27f00614,
697 0x0012d005, 773 0x0012d005,
774/* 0x07d0: ctx_chan_wait */
698 0xfd0012cf, 775 0xfd0012cf,
699 0x1bf40522, 776 0x1bf40522,
700 0x4921f5fa, 777 0x4921f5fa,
778/* 0x07df: ctx_mmio_exec */
701 0x9800f806, 779 0x9800f806,
702 0x27f18103, 780 0x27f18103,
703 0x24b60a04, 781 0x24b60a04,
704 0x0023d006, 782 0x0023d006,
783/* 0x07ee: ctx_mmio_loop */
705 0x34c434bd, 784 0x34c434bd,
706 0x0f1bf4ff, 785 0x0f1bf4ff,
707 0x030057f1, 786 0x030057f1,
708 0xfa0653f0, 787 0xfa0653f0,
709 0x03f80535, 788 0x03f80535,
789/* 0x0800: ctx_mmio_pull */
710 0x98c04e98, 790 0x98c04e98,
711 0x21f4c14f, 791 0x21f4c14f,
712 0x0830b68d, 792 0x0830b68d,
713 0xf40112b6, 793 0xf40112b6,
794/* 0x0812: ctx_mmio_done */
714 0x0398df1b, 795 0x0398df1b,
715 0x0023d016, 796 0x0023d016,
716 0xf1800080, 797 0xf1800080,
717 0xf0020017, 798 0xf0020017,
718 0x01fa0613, 799 0x01fa0613,
719 0xf803f806, 800 0xf803f806,
801/* 0x0829: ctx_xfer */
720 0x0611f400, 802 0x0611f400,
803/* 0x082f: ctx_xfer_pre */
721 0xf01102f4, 804 0xf01102f4,
722 0x21f510f7, 805 0x21f510f7,
723 0x21f50698, 806 0x21f50698,
724 0x11f40631, 807 0x11f40631,
808/* 0x083d: ctx_xfer_pre_load */
725 0x02f7f01c, 809 0x02f7f01c,
726 0x065721f5, 810 0x065721f5,
727 0x066621f5, 811 0x066621f5,
728 0x067821f5, 812 0x067821f5,
729 0x21f5f4bd, 813 0x21f5f4bd,
730 0x21f50657, 814 0x21f50657,
815/* 0x0856: ctx_xfer_exec */
731 0x019806b8, 816 0x019806b8,
732 0x1427f116, 817 0x1427f116,
733 0x0624b604, 818 0x0624b604,
@@ -762,9 +847,11 @@ uint32_t nvc0_grhub_code[] = {
762 0x0a1017f1, 847 0x0a1017f1,
763 0xf00614b6, 848 0xf00614b6,
764 0x12d00527, 849 0x12d00527,
850/* 0x08dd: ctx_xfer_post_save_wait */
765 0x0012cf00, 851 0x0012cf00,
766 0xf40522fd, 852 0xf40522fd,
767 0x02f4fa1b, 853 0x02f4fa1b,
854/* 0x08e9: ctx_xfer_post */
768 0x02f7f032, 855 0x02f7f032,
769 0x065721f5, 856 0x065721f5,
770 0x21f5f4bd, 857 0x21f5f4bd,
@@ -776,7 +863,9 @@ uint32_t nvc0_grhub_code[] = {
776 0x11fd8001, 863 0x11fd8001,
777 0x070bf405, 864 0x070bf405,
778 0x07df21f5, 865 0x07df21f5,
866/* 0x0914: ctx_xfer_no_post_mmio */
779 0x064921f5, 867 0x064921f5,
868/* 0x0918: ctx_xfer_done */
780 0x000000f8, 869 0x000000f8,
781 0x00000000, 870 0x00000000,
782 0x00000000, 871 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
new file mode 100644
index 000000000000..138eeaa28665
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -0,0 +1,780 @@
1/* fuc microcode for nve0 PGRAPH/HUB
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h
28 */
29
30.section #nve0_grhub_data
31include(`nve0.fuc')
32gpc_count: .b32 0
33rop_count: .b32 0
34cmd_queue: queue_init
35hub_mmio_list_head: .b32 0
36hub_mmio_list_tail: .b32 0
37
38ctx_current: .b32 0
39
40chipsets:
41.b8 0xe4 0 0 0
42.b16 #nve4_hub_mmio_head
43.b16 #nve4_hub_mmio_tail
44.b8 0xe7 0 0 0
45.b16 #nve4_hub_mmio_head
46.b16 #nve4_hub_mmio_tail
47.b8 0 0 0 0
48
49nve4_hub_mmio_head:
50mmctx_data(0x17e91c, 2)
51mmctx_data(0x400204, 2)
52mmctx_data(0x404010, 7)
53mmctx_data(0x4040a8, 9)
54mmctx_data(0x4040d0, 7)
55mmctx_data(0x4040f8, 1)
56mmctx_data(0x404130, 3)
57mmctx_data(0x404150, 3)
58mmctx_data(0x404164, 1)
59mmctx_data(0x4041a0, 4)
60mmctx_data(0x404200, 4)
61mmctx_data(0x404404, 14)
62mmctx_data(0x404460, 4)
63mmctx_data(0x404480, 1)
64mmctx_data(0x404498, 1)
65mmctx_data(0x404604, 4)
66mmctx_data(0x404618, 4)
67mmctx_data(0x40462c, 2)
68mmctx_data(0x404640, 1)
69mmctx_data(0x404654, 1)
70mmctx_data(0x404660, 1)
71mmctx_data(0x404678, 19)
72mmctx_data(0x4046c8, 3)
73mmctx_data(0x404700, 3)
74mmctx_data(0x404718, 10)
75mmctx_data(0x404744, 2)
76mmctx_data(0x404754, 1)
77mmctx_data(0x405800, 1)
78mmctx_data(0x405830, 3)
79mmctx_data(0x405854, 1)
80mmctx_data(0x405870, 4)
81mmctx_data(0x405a00, 2)
82mmctx_data(0x405a18, 1)
83mmctx_data(0x405b00, 1)
84mmctx_data(0x405b10, 1)
85mmctx_data(0x406020, 1)
86mmctx_data(0x406028, 4)
87mmctx_data(0x4064a8, 2)
88mmctx_data(0x4064b4, 2)
89mmctx_data(0x4064c0, 12)
90mmctx_data(0x4064fc, 1)
91mmctx_data(0x407040, 1)
92mmctx_data(0x407804, 1)
93mmctx_data(0x40780c, 6)
94mmctx_data(0x4078bc, 1)
95mmctx_data(0x408000, 7)
96mmctx_data(0x408064, 1)
97mmctx_data(0x408800, 3)
98mmctx_data(0x408840, 1)
99mmctx_data(0x408900, 3)
100mmctx_data(0x408980, 1)
101nve4_hub_mmio_tail:
102
103.align 256
104chan_data:
105chan_mmio_count: .b32 0
106chan_mmio_address: .b32 0
107
108.align 256
109xfer_data: .b32 0
110
111.section #nve0_grhub_code
112bra #init
113define(`include_code')
114include(`nve0.fuc')
115
116// reports an exception to the host
117//
118// In: $r15 error code (see nve0.fuc)
119//
120error:
121 push $r14
122 mov $r14 0x814
123 shl b32 $r14 6
124 iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
125 mov $r14 0xc1c
126 shl b32 $r14 6
127 mov $r15 1
128 iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
129 pop $r14
130 ret
131
132// HUB fuc initialisation, executed by triggering ucode start, will
133// fall through to main loop after completion.
134//
135// Input:
136// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
137//
138// Output:
139// CC_SCRATCH[0]:
140// 31:31: set to signal completion
141// CC_SCRATCH[1]:
142// 31:0: total PGRAPH context size
143//
144init:
145 clear b32 $r0
146 mov $sp $r0
147 mov $xdbase $r0
148
149 // enable fifo access
150 mov $r1 0x1200
151 mov $r2 2
152 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
153
154 // setup i0 handler, and route all interrupts to it
155 mov $r1 #ih
156 mov $iv0 $r1
157 mov $r1 0x400
158 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
159
160 // route HUB_CHANNEL_SWITCH to fuc interrupt 8
161 mov $r3 0x404
162 shl b32 $r3 6
163 mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
164 iowr I[$r3 + 0x000] $r2
165
166 // not sure what these are, route them because NVIDIA does, and
167 // the IRQ handler will signal the host if we ever get one.. we
168 // may find out if/why we need to handle these if so..
169 //
170 mov $r2 0x2004
171 iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
172 mov $r2 0x200b
173 iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
174 mov $r2 0x200c
175 iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
176
177 // enable all INTR_UP interrupts
178 mov $r2 0xc24
179 shl b32 $r2 6
180 not b32 $r3 $r0
181 iowr I[$r2] $r3
182
183 // enable fifo, ctxsw, 9, 10, 15 interrupts
184 mov $r2 -0x78fc // 0x8704
185 sethi $r2 0
186 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
187
188 // fifo level triggered, rest edge
189 sub b32 $r1 0x100
190 mov $r2 4
191 iowr I[$r1] $r2
192
193 // enable interrupts
194 bset $flags ie0
195
196 // fetch enabled GPC/ROP counts
197 mov $r14 -0x69fc // 0x409604
198 sethi $r14 0x400000
199 call #nv_rd32
200 extr $r1 $r15 16:20
201 st b32 D[$r0 + #rop_count] $r1
202 and $r15 0x1f
203 st b32 D[$r0 + #gpc_count] $r15
204
205 // set BAR_REQMASK to GPC mask
206 mov $r1 1
207 shl b32 $r1 $r15
208 sub b32 $r1 1
209 mov $r2 0x40c
210 shl b32 $r2 6
211 iowr I[$r2 + 0x000] $r1
212 iowr I[$r2 + 0x100] $r1
213
214 // find context data for this chipset
215 mov $r2 0x800
216 shl b32 $r2 6
217 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
218 mov $r15 #chipsets - 8
219 init_find_chipset:
220 add b32 $r15 8
221 ld b32 $r3 D[$r15 + 0x00]
222 cmpu b32 $r3 $r2
223 bra e #init_context
224 cmpu b32 $r3 0
225 bra ne #init_find_chipset
226 // unknown chipset
227 ret
228
229 // context size calculation, reserve first 256 bytes for use by fuc
230 init_context:
231 mov $r1 256
232
233 // calculate size of mmio context data
234 ld b16 $r14 D[$r15 + 4]
235 ld b16 $r15 D[$r15 + 6]
236 sethi $r14 0
237 st b32 D[$r0 + #hub_mmio_list_head] $r14
238 st b32 D[$r0 + #hub_mmio_list_tail] $r15
239 call #mmctx_size
240
241 // set mmctx base addresses now so we don't have to do it later,
242 // they don't (currently) ever change
243 mov $r3 0x700
244 shl b32 $r3 6
245 shr b32 $r4 $r1 8
246 iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
247 iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
248 add b32 $r3 0x1300
249 add b32 $r1 $r15
250 shr b32 $r15 2
251 iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
252
253 // strands, base offset needs to be aligned to 256 bytes
254 shr b32 $r1 8
255 add b32 $r1 1
256 shl b32 $r1 8
257 mov b32 $r15 $r1
258 call #strand_ctx_init
259 add b32 $r1 $r15
260
261 // initialise each GPC in sequence by passing in the offset of its
262 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
263 // has previously been uploaded by the host) running.
264 //
265 // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
266 // when it has completed, and return the size of its context data
267 // in GPCn_CC_SCRATCH[1]
268 //
269 ld b32 $r3 D[$r0 + #gpc_count]
270 mov $r4 0x2000
271 sethi $r4 0x500000
272 init_gpc:
273 // setup, and start GPC ucode running
274 add b32 $r14 $r4 0x804
275 mov b32 $r15 $r1
276 call #nv_wr32 // CC_SCRATCH[1] = ctx offset
277 add b32 $r14 $r4 0x800
278 mov b32 $r15 $r2
279 call #nv_wr32 // CC_SCRATCH[0] = chipset
280 add b32 $r14 $r4 0x10c
281 clear b32 $r15
282 call #nv_wr32
283 add b32 $r14 $r4 0x104
284 call #nv_wr32 // ENTRY
285 add b32 $r14 $r4 0x100
286 mov $r15 2 // CTRL_START_TRIGGER
287 call #nv_wr32 // CTRL
288
289 // wait for it to complete, and adjust context size
290 add b32 $r14 $r4 0x800
291 init_gpc_wait:
292 call #nv_rd32
293 xbit $r15 $r15 31
294 bra e #init_gpc_wait
295 add b32 $r14 $r4 0x804
296 call #nv_rd32
297 add b32 $r1 $r15
298
299 // next!
300 add b32 $r4 0x8000
301 sub b32 $r3 1
302 bra ne #init_gpc
303
304 // save context size, and tell host we're ready
305 mov $r2 0x800
306 shl b32 $r2 6
307 iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
308 add b32 $r2 0x800
309 clear b32 $r1
310 bset $r1 31
311 iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
312
313// Main program loop, very simple, sleeps until woken up by the interrupt
314// handler, pulls a command from the queue and executes its handler
315//
316main:
317 // sleep until we have something to do
318 bset $flags $p0
319 sleep $p0
320 mov $r13 #cmd_queue
321 call #queue_get
322 bra $p1 #main
323
324 // context switch, requested by GPU?
325 cmpu b32 $r14 0x4001
326 bra ne #main_not_ctx_switch
327 trace_set(T_AUTO)
328 mov $r1 0xb00
329 shl b32 $r1 6
330 iord $r2 I[$r1 + 0x100] // CHAN_NEXT
331 iord $r1 I[$r1 + 0x000] // CHAN_CUR
332
333 xbit $r3 $r1 31
334 bra e #chsw_no_prev
335 xbit $r3 $r2 31
336 bra e #chsw_prev_no_next
337 push $r2
338 mov b32 $r2 $r1
339 trace_set(T_SAVE)
340 bclr $flags $p1
341 bset $flags $p2
342 call #ctx_xfer
343 trace_clr(T_SAVE);
344 pop $r2
345 trace_set(T_LOAD);
346 bset $flags $p1
347 call #ctx_xfer
348 trace_clr(T_LOAD);
349 bra #chsw_done
350 chsw_prev_no_next:
351 push $r2
352 mov b32 $r2 $r1
353 bclr $flags $p1
354 bclr $flags $p2
355 call #ctx_xfer
356 pop $r2
357 mov $r1 0xb00
358 shl b32 $r1 6
359 iowr I[$r1] $r2
360 bra #chsw_done
361 chsw_no_prev:
362 xbit $r3 $r2 31
363 bra e #chsw_done
364 bset $flags $p1
365 bclr $flags $p2
366 call #ctx_xfer
367
368 // ack the context switch request
369 chsw_done:
370 mov $r1 0xb0c
371 shl b32 $r1 6
372 mov $r2 1
373 iowr I[$r1 + 0x000] $r2 // 0x409b0c
374 trace_clr(T_AUTO)
375 bra #main
376
377 // request to set current channel? (*not* a context switch)
378 main_not_ctx_switch:
379 cmpu b32 $r14 0x0001
380 bra ne #main_not_ctx_chan
381 mov b32 $r2 $r15
382 call #ctx_chan
383 bra #main_done
384
385 // request to store current channel context?
386 main_not_ctx_chan:
387 cmpu b32 $r14 0x0002
388 bra ne #main_not_ctx_save
389 trace_set(T_SAVE)
390 bclr $flags $p1
391 bclr $flags $p2
392 call #ctx_xfer
393 trace_clr(T_SAVE)
394 bra #main_done
395
396 main_not_ctx_save:
397 shl b32 $r15 $r14 16
398 or $r15 E_BAD_COMMAND
399 call #error
400 bra #main
401
402 main_done:
403 mov $r1 0x820
404 shl b32 $r1 6
405 clear b32 $r2
406 bset $r2 31
407 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
408 bra #main
409
410// interrupt handler
411ih:
412 push $r8
413 mov $r8 $flags
414 push $r8
415 push $r9
416 push $r10
417 push $r11
418 push $r13
419 push $r14
420 push $r15
421
422 // incoming fifo command?
423 iord $r10 I[$r0 + 0x200] // INTR
424 and $r11 $r10 0x00000004
425 bra e #ih_no_fifo
426 // queue incoming fifo command for later processing
427 mov $r11 0x1900
428 mov $r13 #cmd_queue
429 iord $r14 I[$r11 + 0x100] // FIFO_CMD
430 iord $r15 I[$r11 + 0x000] // FIFO_DATA
431 call #queue_put
432 add b32 $r11 0x400
433 mov $r14 1
434 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
435
436 // context switch request?
437 ih_no_fifo:
438 and $r11 $r10 0x00000100
439 bra e #ih_no_ctxsw
440 // enqueue a context switch for later processing
441 mov $r13 #cmd_queue
442 mov $r14 0x4001
443 call #queue_put
444
445 // anything we didn't handle, bring it to the host's attention
446 ih_no_ctxsw:
447 mov $r11 0x104
448 not b32 $r11
449 and $r11 $r10 $r11
450 bra e #ih_no_other
451 mov $r10 0xc1c
452 shl b32 $r10 6
453 iowr I[$r10] $r11 // INTR_UP_SET
454
455 // ack, and wake up main()
456 ih_no_other:
457 iowr I[$r0 + 0x100] $r10 // INTR_ACK
458
459 pop $r15
460 pop $r14
461 pop $r13
462 pop $r11
463 pop $r10
464 pop $r9
465 pop $r8
466 mov $flags $r8
467 pop $r8
468 bclr $flags $p0
469 iret
470
471// Again, not real sure
472//
473// In: $r15 value to set 0x404170 to
474//
475ctx_4170s:
476 mov $r14 0x4170
477 sethi $r14 0x400000
478 or $r15 0x10
479 call #nv_wr32
480 ret
481
482// Waits for a ctx_4170s() call to complete
483//
484ctx_4170w:
485 mov $r14 0x4170
486 sethi $r14 0x400000
487 call #nv_rd32
488 and $r15 0x10
489 bra ne #ctx_4170w
490 ret
491
492// Disables various things, waits a bit, and re-enables them..
493//
494// Not sure how exactly this helps, perhaps "ENABLE" is not such a
495// good description for the bits we turn off? Anyways, without this,
496// funny things happen.
497//
498ctx_redswitch:
499 mov $r14 0x614
500 shl b32 $r14 6
501 mov $r15 0x270
502 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
503 mov $r15 8
504 ctx_redswitch_delay:
505 sub b32 $r15 1
506 bra ne #ctx_redswitch_delay
507 mov $r15 0x770
508 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
509 ret
510
511// Not a clue what this is for, except that unless the value is 0x10, the
512// strand context is saved (and presumably restored) incorrectly..
513//
514// In: $r15 value to set to (0x00/0x10 are used)
515//
516ctx_86c:
517 mov $r14 0x86c
518 shl b32 $r14 6
519 iowr I[$r14] $r15 // HUB(0x86c) = val
520 mov $r14 -0x75ec
521 sethi $r14 0x400000
522 call #nv_wr32 // ROP(0xa14) = val
523 mov $r14 -0x5794
524 sethi $r14 0x410000
525 call #nv_wr32 // GPC(0x86c) = val
526 ret
527
528// ctx_load - load's a channel's ctxctl data, and selects its vm
529//
530// In: $r2 channel address
531//
532ctx_load:
533 trace_set(T_CHAN)
534
535 // switch to channel, somewhat magic in parts..
536 mov $r10 12 // DONE_UNK12
537 call #wait_donez
538 mov $r1 0xa24
539 shl b32 $r1 6
540 iowr I[$r1 + 0x000] $r0 // 0x409a24
541 mov $r3 0xb00
542 shl b32 $r3 6
543 iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
544 mov $r1 0xa0c
545 shl b32 $r1 6
546 mov $r4 7
547 iowr I[$r1 + 0x000] $r2 // MEM_CHAN
548 iowr I[$r1 + 0x100] $r4 // MEM_CMD
549 ctx_chan_wait_0:
550 iord $r4 I[$r1 + 0x100]
551 and $r4 0x1f
552 bra ne #ctx_chan_wait_0
553 iowr I[$r3 + 0x000] $r2 // CHAN_CUR
554
555 // load channel header, fetch PGRAPH context pointer
556 mov $xtargets $r0
557 bclr $r2 31
558 shl b32 $r2 4
559 add b32 $r2 2
560
561 trace_set(T_LCHAN)
562 mov $r1 0xa04
563 shl b32 $r1 6
564 iowr I[$r1 + 0x000] $r2 // MEM_BASE
565 mov $r1 0xa20
566 shl b32 $r1 6
567 mov $r2 0x0002
568 sethi $r2 0x80000000
569 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
570 mov $r1 0x10 // chan + 0x0210
571 mov $r2 #xfer_data
572 sethi $r2 0x00020000 // 16 bytes
573 xdld $r1 $r2
574 xdwait
575 trace_clr(T_LCHAN)
576
577 // update current context
578 ld b32 $r1 D[$r0 + #xfer_data + 4]
579 shl b32 $r1 24
580 ld b32 $r2 D[$r0 + #xfer_data + 0]
581 shr b32 $r2 8
582 or $r1 $r2
583 st b32 D[$r0 + #ctx_current] $r1
584
585 // set transfer base to start of context, and fetch context header
586 trace_set(T_LCTXH)
587 mov $r2 0xa04
588 shl b32 $r2 6
589 iowr I[$r2 + 0x000] $r1 // MEM_BASE
590 mov $r2 1
591 mov $r1 0xa20
592 shl b32 $r1 6
593 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
594 mov $r1 #chan_data
595 sethi $r1 0x00060000 // 256 bytes
596 xdld $r0 $r1
597 xdwait
598 trace_clr(T_LCTXH)
599
600 trace_clr(T_CHAN)
601 ret
602
603// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
604// the active channel for ctxctl, but not actually transfer
605// any context data. intended for use only during initial
606// context construction.
607//
608// In: $r2 channel address
609//
610ctx_chan:
611 call #ctx_load
612 mov $r10 12 // DONE_UNK12
613 call #wait_donez
614 mov $r1 0xa10
615 shl b32 $r1 6
616 mov $r2 5
617 iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
618 ctx_chan_wait:
619 iord $r2 I[$r1 + 0x000]
620 or $r2 $r2
621 bra ne #ctx_chan_wait
622 ret
623
624// Execute per-context state overrides list
625//
626// Only executed on the first load of a channel. Might want to look into
627// removing this and having the host directly modify the channel's context
628// to change this state... The nouveau DRM already builds this list as
629// it's definitely needed for NVIDIA's, so we may as well use it for now
630//
631// Input: $r1 mmio list length
632//
633ctx_mmio_exec:
634 // set transfer base to be the mmio list
635 ld b32 $r3 D[$r0 + #chan_mmio_address]
636 mov $r2 0xa04
637 shl b32 $r2 6
638 iowr I[$r2 + 0x000] $r3 // MEM_BASE
639
640 clear b32 $r3
641 ctx_mmio_loop:
642 // fetch next 256 bytes of mmio list if necessary
643 and $r4 $r3 0xff
644 bra ne #ctx_mmio_pull
645 mov $r5 #xfer_data
646 sethi $r5 0x00060000 // 256 bytes
647 xdld $r3 $r5
648 xdwait
649
650 // execute a single list entry
651 ctx_mmio_pull:
652 ld b32 $r14 D[$r4 + #xfer_data + 0x00]
653 ld b32 $r15 D[$r4 + #xfer_data + 0x04]
654 call #nv_wr32
655
656 // next!
657 add b32 $r3 8
658 sub b32 $r1 1
659 bra ne #ctx_mmio_loop
660
661 // set transfer base back to the current context
662 ctx_mmio_done:
663 ld b32 $r3 D[$r0 + #ctx_current]
664 iowr I[$r2 + 0x000] $r3 // MEM_BASE
665
666 // disable the mmio list now, we don't need/want to execute it again
667 st b32 D[$r0 + #chan_mmio_count] $r0
668 mov $r1 #chan_data
669 sethi $r1 0x00060000 // 256 bytes
670 xdst $r0 $r1
671 xdwait
672 ret
673
674// Transfer HUB context data between GPU and storage area
675//
676// In: $r2 channel address
677// $p1 clear on save, set on load
678// $p2 set if opposite direction done/will be done, so:
679// on save it means: "a load will follow this save"
680// on load it means: "a save preceeded this load"
681//
682ctx_xfer:
683 bra not $p1 #ctx_xfer_pre
684 bra $p2 #ctx_xfer_pre_load
685 ctx_xfer_pre:
686 mov $r15 0x10
687 call #ctx_86c
688 bra not $p1 #ctx_xfer_exec
689
690 ctx_xfer_pre_load:
691 mov $r15 2
692 call #ctx_4170s
693 call #ctx_4170w
694 call #ctx_redswitch
695 clear b32 $r15
696 call #ctx_4170s
697 call #ctx_load
698
699 // fetch context pointer, and initiate xfer on all GPCs
700 ctx_xfer_exec:
701 ld b32 $r1 D[$r0 + #ctx_current]
702 mov $r2 0x414
703 shl b32 $r2 6
704 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
705 mov $r14 -0x5b00
706 sethi $r14 0x410000
707 mov b32 $r15 $r1
708 call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
709 add b32 $r14 4
710 xbit $r15 $flags $p1
711 xbit $r2 $flags $p2
712 shl b32 $r2 1
713 or $r15 $r2
714 call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
715
716 // strands
717 mov $r1 0x4afc
718 sethi $r1 0x20000
719 mov $r2 0xc
720 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
721 call #strand_wait
722 mov $r2 0x47fc
723 sethi $r2 0x20000
724 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
725 xbit $r2 $flags $p1
726 add b32 $r2 3
727 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
728
729 // mmio context
730 xbit $r10 $flags $p1 // direction
731 or $r10 6 // first, last
732 mov $r11 0 // base = 0
733 ld b32 $r12 D[$r0 + #hub_mmio_list_head]
734 ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
735 mov $r14 0 // not multi
736 call #mmctx_xfer
737
738 // wait for GPCs to all complete
739 mov $r10 8 // DONE_BAR
740 call #wait_doneo
741
742 // wait for strand xfer to complete
743 call #strand_wait
744
745 // post-op
746 bra $p1 #ctx_xfer_post
747 mov $r10 12 // DONE_UNK12
748 call #wait_donez
749 mov $r1 0xa10
750 shl b32 $r1 6
751 mov $r2 5
752 iowr I[$r1] $r2 // MEM_CMD
753 ctx_xfer_post_save_wait:
754 iord $r2 I[$r1]
755 or $r2 $r2
756 bra ne #ctx_xfer_post_save_wait
757
758 bra $p2 #ctx_xfer_done
759 ctx_xfer_post:
760 mov $r15 2
761 call #ctx_4170s
762 clear b32 $r15
763 call #ctx_86c
764 call #strand_post
765 call #ctx_4170w
766 clear b32 $r15
767 call #ctx_4170s
768
769 bra not $p1 #ctx_xfer_no_post_mmio
770 ld b32 $r1 D[$r0 + #chan_mmio_count]
771 or $r1 $r1
772 bra e #ctx_xfer_no_post_mmio
773 call #ctx_mmio_exec
774
775 ctx_xfer_no_post_mmio:
776
777 ctx_xfer_done:
778 ret
779
780.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
new file mode 100644
index 000000000000..decf0c60ca3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -0,0 +1,857 @@
1uint32_t nve0_grhub_data[] = {
2/* 0x0000: gpc_count */
3 0x00000000,
4/* 0x0004: rop_count */
5 0x00000000,
6/* 0x0008: cmd_queue */
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0050: hub_mmio_list_head */
26 0x00000000,
27/* 0x0054: hub_mmio_list_tail */
28 0x00000000,
29/* 0x0058: ctx_current */
30 0x00000000,
31/* 0x005c: chipsets */
32 0x000000e4,
33 0x013c0070,
34 0x000000e7,
35 0x013c0070,
36 0x00000000,
37/* 0x0070: nve4_hub_mmio_head */
38 0x0417e91c,
39 0x04400204,
40 0x18404010,
41 0x204040a8,
42 0x184040d0,
43 0x004040f8,
44 0x08404130,
45 0x08404150,
46 0x00404164,
47 0x0c4041a0,
48 0x0c404200,
49 0x34404404,
50 0x0c404460,
51 0x00404480,
52 0x00404498,
53 0x0c404604,
54 0x0c404618,
55 0x0440462c,
56 0x00404640,
57 0x00404654,
58 0x00404660,
59 0x48404678,
60 0x084046c8,
61 0x08404700,
62 0x24404718,
63 0x04404744,
64 0x00404754,
65 0x00405800,
66 0x08405830,
67 0x00405854,
68 0x0c405870,
69 0x04405a00,
70 0x00405a18,
71 0x00405b00,
72 0x00405b10,
73 0x00406020,
74 0x0c406028,
75 0x044064a8,
76 0x044064b4,
77 0x2c4064c0,
78 0x004064fc,
79 0x00407040,
80 0x00407804,
81 0x1440780c,
82 0x004078bc,
83 0x18408000,
84 0x00408064,
85 0x08408800,
86 0x00408840,
87 0x08408900,
88 0x00408980,
89/* 0x013c: nve4_hub_mmio_tail */
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139/* 0x0200: chan_data */
140/* 0x0200: chan_mmio_count */
141 0x00000000,
142/* 0x0204: chan_mmio_address */
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206/* 0x0300: xfer_data */
207 0x00000000,
208};
209
210uint32_t nve0_grhub_code[] = {
211 0x03090ef5,
212/* 0x0004: queue_put */
213 0x9800d898,
214 0x86f001d9,
215 0x0489b808,
216 0xf00c1bf4,
217 0x21f502f7,
218 0x00f802ec,
219/* 0x001c: queue_put_next */
220 0xb60798c4,
221 0x8dbb0384,
222 0x0880b600,
223 0x80008e80,
224 0x90b6018f,
225 0x0f94f001,
226 0xf801d980,
227/* 0x0039: queue_get */
228 0x0131f400,
229 0x9800d898,
230 0x89b801d9,
231 0x210bf404,
232 0xb60789c4,
233 0x9dbb0394,
234 0x0890b600,
235 0x98009e98,
236 0x80b6019f,
237 0x0f84f001,
238 0xf400d880,
239/* 0x0066: queue_get_done */
240 0x00f80132,
241/* 0x0068: nv_rd32 */
242 0x0728b7f1,
243 0xb906b4b6,
244 0xc9f002ec,
245 0x00bcd01f,
246/* 0x0078: nv_rd32_wait */
247 0xc800bccf,
248 0x1bf41fcc,
249 0x06a7f0fa,
250 0x010321f5,
251 0xf840bfcf,
252/* 0x008d: nv_wr32 */
253 0x28b7f100,
254 0x06b4b607,
255 0xb980bfd0,
256 0xc9f002ec,
257 0x1ec9f01f,
258/* 0x00a3: nv_wr32_wait */
259 0xcf00bcd0,
260 0xccc800bc,
261 0xfa1bf41f,
262/* 0x00ae: watchdog_reset */
263 0x87f100f8,
264 0x84b60430,
265 0x1ff9f006,
266 0xf8008fd0,
267/* 0x00bd: watchdog_clear */
268 0x3087f100,
269 0x0684b604,
270 0xf80080d0,
271/* 0x00c9: wait_donez */
272 0x3c87f100,
273 0x0684b608,
274 0x99f094bd,
275 0x0089d000,
276 0x081887f1,
277 0xd00684b6,
278/* 0x00e2: wait_done_wait_donez */
279 0x87f1008a,
280 0x84b60400,
281 0x0088cf06,
282 0xf4888aff,
283 0x87f1f31b,
284 0x84b6085c,
285 0xf094bd06,
286 0x89d00099,
287/* 0x0103: wait_doneo */
288 0xf100f800,
289 0xb6083c87,
290 0x94bd0684,
291 0xd00099f0,
292 0x87f10089,
293 0x84b60818,
294 0x008ad006,
295/* 0x011c: wait_done_wait_doneo */
296 0x040087f1,
297 0xcf0684b6,
298 0x8aff0088,
299 0xf30bf488,
300 0x085c87f1,
301 0xbd0684b6,
302 0x0099f094,
303 0xf80089d0,
304/* 0x013d: mmctx_size */
305/* 0x013f: nv_mmctx_size_loop */
306 0x9894bd00,
307 0x85b600e8,
308 0x0180b61a,
309 0xbb0284b6,
310 0xe0b60098,
311 0x04efb804,
312 0xb9eb1bf4,
313 0x00f8029f,
314/* 0x015c: mmctx_xfer */
315 0x083c87f1,
316 0xbd0684b6,
317 0x0199f094,
318 0xf10089d0,
319 0xb6071087,
320 0x94bd0684,
321 0xf405bbfd,
322 0x8bd0090b,
323 0x0099f000,
324/* 0x0180: mmctx_base_disabled */
325 0xf405eefd,
326 0x8ed00c0b,
327 0xc08fd080,
328/* 0x018f: mmctx_multi_disabled */
329 0xb70199f0,
330 0xc8010080,
331 0xb4b600ab,
332 0x0cb9f010,
333 0xb601aec8,
334 0xbefd11e4,
335 0x008bd005,
336/* 0x01a8: mmctx_exec_loop */
337/* 0x01a8: mmctx_wait_free */
338 0xf0008ecf,
339 0x0bf41fe4,
340 0x00ce98fa,
341 0xd005e9fd,
342 0xc0b6c08e,
343 0x04cdb804,
344 0xc8e81bf4,
345 0x1bf402ab,
346/* 0x01c9: mmctx_fini_wait */
347 0x008bcf18,
348 0xb01fb4f0,
349 0x1bf410b4,
350 0x02a7f0f7,
351 0xf4c921f4,
352/* 0x01de: mmctx_stop */
353 0xabc81b0e,
354 0x10b4b600,
355 0xf00cb9f0,
356 0x8bd012b9,
357/* 0x01ed: mmctx_stop_wait */
358 0x008bcf00,
359 0xf412bbc8,
360/* 0x01f6: mmctx_done */
361 0x87f1fa1b,
362 0x84b6085c,
363 0xf094bd06,
364 0x89d00199,
365/* 0x0207: strand_wait */
366 0xf900f800,
367 0x02a7f0a0,
368 0xfcc921f4,
369/* 0x0213: strand_pre */
370 0xf100f8a0,
371 0xf04afc87,
372 0x97f00283,
373 0x0089d00c,
374 0x020721f5,
375/* 0x0226: strand_post */
376 0x87f100f8,
377 0x83f04afc,
378 0x0d97f002,
379 0xf50089d0,
380 0xf8020721,
381/* 0x0239: strand_set */
382 0xfca7f100,
383 0x02a3f04f,
384 0x0500aba2,
385 0xd00fc7f0,
386 0xc7f000ac,
387 0x00bcd00b,
388 0x020721f5,
389 0xf000aed0,
390 0xbcd00ac7,
391 0x0721f500,
392/* 0x0263: strand_ctx_init */
393 0xf100f802,
394 0xb6083c87,
395 0x94bd0684,
396 0xd00399f0,
397 0x21f50089,
398 0xe7f00213,
399 0x3921f503,
400 0xfca7f102,
401 0x02a3f046,
402 0x0400aba0,
403 0xf040a0d0,
404 0xbcd001c7,
405 0x0721f500,
406 0x010c9202,
407 0xf000acd0,
408 0xbcd002c7,
409 0x0721f500,
410 0x2621f502,
411 0x8087f102,
412 0x0684b608,
413 0xb70089cf,
414 0x95220080,
415/* 0x02ba: ctx_init_strand_loop */
416 0x8ed008fe,
417 0x408ed000,
418 0xb6808acf,
419 0xa0b606a5,
420 0x00eabb01,
421 0xb60480b6,
422 0x1bf40192,
423 0x08e4b6e8,
424 0xf1f2efbc,
425 0xb6085c87,
426 0x94bd0684,
427 0xd00399f0,
428 0x00f80089,
429/* 0x02ec: error */
430 0xe7f1e0f9,
431 0xe4b60814,
432 0x00efd006,
433 0x0c1ce7f1,
434 0xf006e4b6,
435 0xefd001f7,
436 0xf8e0fc00,
437/* 0x0309: init */
438 0xfe04bd00,
439 0x07fe0004,
440 0x0017f100,
441 0x0227f012,
442 0xf10012d0,
443 0xfe05b917,
444 0x17f10010,
445 0x10d00400,
446 0x0437f1c0,
447 0x0634b604,
448 0x200327f1,
449 0xf10032d0,
450 0xd0200427,
451 0x27f10132,
452 0x32d0200b,
453 0x0c27f102,
454 0x0732d020,
455 0x0c2427f1,
456 0xb90624b6,
457 0x23d00003,
458 0x0427f100,
459 0x0023f087,
460 0xb70012d0,
461 0xf0010012,
462 0x12d00427,
463 0x1031f400,
464 0x9604e7f1,
465 0xf440e3f0,
466 0xf1c76821,
467 0x01018090,
468 0x801ff4f0,
469 0x17f0000f,
470 0x041fbb01,
471 0xf10112b6,
472 0xb6040c27,
473 0x21d00624,
474 0x4021d000,
475 0x080027f1,
476 0xcf0624b6,
477 0xf7f00022,
478/* 0x03a9: init_find_chipset */
479 0x08f0b654,
480 0xb800f398,
481 0x0bf40432,
482 0x0034b00b,
483 0xf8f11bf4,
484/* 0x03bd: init_context */
485 0x0017f100,
486 0x02fe5801,
487 0xf003ff58,
488 0x0e8000e3,
489 0x150f8014,
490 0x013d21f5,
491 0x070037f1,
492 0x950634b6,
493 0x34d00814,
494 0x4034d000,
495 0x130030b7,
496 0xb6001fbb,
497 0x3fd002f5,
498 0x0815b600,
499 0xb60110b6,
500 0x1fb90814,
501 0x6321f502,
502 0x001fbb02,
503 0xf1000398,
504 0xf0200047,
505/* 0x040e: init_gpc */
506 0x4ea05043,
507 0x1fb90804,
508 0x8d21f402,
509 0x08004ea0,
510 0xf4022fb9,
511 0x4ea08d21,
512 0xf4bd010c,
513 0xa08d21f4,
514 0xf401044e,
515 0x4ea08d21,
516 0xf7f00100,
517 0x8d21f402,
518 0x08004ea0,
519/* 0x0440: init_gpc_wait */
520 0xc86821f4,
521 0x0bf41fff,
522 0x044ea0fa,
523 0x6821f408,
524 0xb7001fbb,
525 0xb6800040,
526 0x1bf40132,
527 0x0027f1b4,
528 0x0624b608,
529 0xb74021d0,
530 0xbd080020,
531 0x1f19f014,
532/* 0x0473: main */
533 0xf40021d0,
534 0x28f40031,
535 0x08d7f000,
536 0xf43921f4,
537 0xe4b1f401,
538 0x1bf54001,
539 0x87f100d1,
540 0x84b6083c,
541 0xf094bd06,
542 0x89d00499,
543 0x0017f100,
544 0x0614b60b,
545 0xcf4012cf,
546 0x13c80011,
547 0x7e0bf41f,
548 0xf41f23c8,
549 0x20f95a0b,
550 0xf10212b9,
551 0xb6083c87,
552 0x94bd0684,
553 0xd00799f0,
554 0x32f40089,
555 0x0231f401,
556 0x07fb21f5,
557 0x085c87f1,
558 0xbd0684b6,
559 0x0799f094,
560 0xfc0089d0,
561 0x3c87f120,
562 0x0684b608,
563 0x99f094bd,
564 0x0089d006,
565 0xf50131f4,
566 0xf107fb21,
567 0xb6085c87,
568 0x94bd0684,
569 0xd00699f0,
570 0x0ef40089,
571/* 0x0509: chsw_prev_no_next */
572 0xb920f931,
573 0x32f40212,
574 0x0232f401,
575 0x07fb21f5,
576 0x17f120fc,
577 0x14b60b00,
578 0x0012d006,
579/* 0x0527: chsw_no_prev */
580 0xc8130ef4,
581 0x0bf41f23,
582 0x0131f40d,
583 0xf50232f4,
584/* 0x0537: chsw_done */
585 0xf107fb21,
586 0xb60b0c17,
587 0x27f00614,
588 0x0012d001,
589 0x085c87f1,
590 0xbd0684b6,
591 0x0499f094,
592 0xf50089d0,
593/* 0x0557: main_not_ctx_switch */
594 0xb0ff200e,
595 0x1bf401e4,
596 0x02f2b90d,
597 0x078f21f5,
598/* 0x0567: main_not_ctx_chan */
599 0xb0420ef4,
600 0x1bf402e4,
601 0x3c87f12e,
602 0x0684b608,
603 0x99f094bd,
604 0x0089d007,
605 0xf40132f4,
606 0x21f50232,
607 0x87f107fb,
608 0x84b6085c,
609 0xf094bd06,
610 0x89d00799,
611 0x110ef400,
612/* 0x0598: main_not_ctx_save */
613 0xf010ef94,
614 0x21f501f5,
615 0x0ef502ec,
616/* 0x05a6: main_done */
617 0x17f1fed1,
618 0x14b60820,
619 0xf024bd06,
620 0x12d01f29,
621 0xbe0ef500,
622/* 0x05b9: ih */
623 0xfe80f9fe,
624 0x80f90188,
625 0xa0f990f9,
626 0xd0f9b0f9,
627 0xf0f9e0f9,
628 0xc4800acf,
629 0x0bf404ab,
630 0x00b7f11d,
631 0x08d7f019,
632 0xcf40becf,
633 0x21f400bf,
634 0x00b0b704,
635 0x01e7f004,
636/* 0x05ef: ih_no_fifo */
637 0xe400bed0,
638 0xf40100ab,
639 0xd7f00d0b,
640 0x01e7f108,
641 0x0421f440,
642/* 0x0600: ih_no_ctxsw */
643 0x0104b7f1,
644 0xabffb0bd,
645 0x0d0bf4b4,
646 0x0c1ca7f1,
647 0xd006a4b6,
648/* 0x0616: ih_no_other */
649 0x0ad000ab,
650 0xfcf0fc40,
651 0xfcd0fce0,
652 0xfca0fcb0,
653 0xfe80fc90,
654 0x80fc0088,
655 0xf80032f4,
656/* 0x0631: ctx_4170s */
657 0x70e7f101,
658 0x40e3f041,
659 0xf410f5f0,
660 0x00f88d21,
661/* 0x0640: ctx_4170w */
662 0x4170e7f1,
663 0xf440e3f0,
664 0xf4f06821,
665 0xf31bf410,
666/* 0x0652: ctx_redswitch */
667 0xe7f100f8,
668 0xe4b60614,
669 0x70f7f106,
670 0x00efd002,
671/* 0x0663: ctx_redswitch_delay */
672 0xb608f7f0,
673 0x1bf401f2,
674 0x70f7f1fd,
675 0x00efd007,
676/* 0x0672: ctx_86c */
677 0xe7f100f8,
678 0xe4b6086c,
679 0x00efd006,
680 0x8a14e7f1,
681 0xf440e3f0,
682 0xe7f18d21,
683 0xe3f0a86c,
684 0x8d21f441,
685/* 0x0692: ctx_load */
686 0x87f100f8,
687 0x84b6083c,
688 0xf094bd06,
689 0x89d00599,
690 0x0ca7f000,
691 0xf1c921f4,
692 0xb60a2417,
693 0x10d00614,
694 0x0037f100,
695 0x0634b60b,
696 0xf14032d0,
697 0xb60a0c17,
698 0x47f00614,
699 0x0012d007,
700/* 0x06cb: ctx_chan_wait_0 */
701 0xcf4014d0,
702 0x44f04014,
703 0xfa1bf41f,
704 0xfe0032d0,
705 0x2af0000b,
706 0x0424b61f,
707 0xf10220b6,
708 0xb6083c87,
709 0x94bd0684,
710 0xd00899f0,
711 0x17f10089,
712 0x14b60a04,
713 0x0012d006,
714 0x0a2017f1,
715 0xf00614b6,
716 0x23f10227,
717 0x12d08000,
718 0x1017f000,
719 0x030027f1,
720 0xfa0223f0,
721 0x03f80512,
722 0x085c87f1,
723 0xbd0684b6,
724 0x0899f094,
725 0x980089d0,
726 0x14b6c101,
727 0xc0029818,
728 0xfd0825b6,
729 0x01800512,
730 0x3c87f116,
731 0x0684b608,
732 0x99f094bd,
733 0x0089d009,
734 0x0a0427f1,
735 0xd00624b6,
736 0x27f00021,
737 0x2017f101,
738 0x0614b60a,
739 0xf10012d0,
740 0xf0020017,
741 0x01fa0613,
742 0xf103f805,
743 0xb6085c87,
744 0x94bd0684,
745 0xd00999f0,
746 0x87f10089,
747 0x84b6085c,
748 0xf094bd06,
749 0x89d00599,
750/* 0x078f: ctx_chan */
751 0xf500f800,
752 0xf0069221,
753 0x21f40ca7,
754 0x1017f1c9,
755 0x0614b60a,
756 0xd00527f0,
757/* 0x07a6: ctx_chan_wait */
758 0x12cf0012,
759 0x0522fd00,
760 0xf8fa1bf4,
761/* 0x07b1: ctx_mmio_exec */
762 0x81039800,
763 0x0a0427f1,
764 0xd00624b6,
765 0x34bd0023,
766/* 0x07c0: ctx_mmio_loop */
767 0xf4ff34c4,
768 0x57f10f1b,
769 0x53f00300,
770 0x0535fa06,
771/* 0x07d2: ctx_mmio_pull */
772 0x4e9803f8,
773 0xc14f98c0,
774 0xb68d21f4,
775 0x12b60830,
776 0xdf1bf401,
777/* 0x07e4: ctx_mmio_done */
778 0xd0160398,
779 0x00800023,
780 0x0017f180,
781 0x0613f002,
782 0xf80601fa,
783/* 0x07fb: ctx_xfer */
784 0xf400f803,
785 0x02f40611,
786/* 0x0801: ctx_xfer_pre */
787 0x10f7f00d,
788 0x067221f5,
789/* 0x080b: ctx_xfer_pre_load */
790 0xf01c11f4,
791 0x21f502f7,
792 0x21f50631,
793 0x21f50640,
794 0xf4bd0652,
795 0x063121f5,
796 0x069221f5,
797/* 0x0824: ctx_xfer_exec */
798 0xf1160198,
799 0xb6041427,
800 0x20d00624,
801 0x00e7f100,
802 0x41e3f0a5,
803 0xf4021fb9,
804 0xe0b68d21,
805 0x01fcf004,
806 0xb6022cf0,
807 0xf2fd0124,
808 0x8d21f405,
809 0x4afc17f1,
810 0xf00213f0,
811 0x12d00c27,
812 0x0721f500,
813 0xfc27f102,
814 0x0223f047,
815 0xf00020d0,
816 0x20b6012c,
817 0x0012d003,
818 0xf001acf0,
819 0xb7f006a5,
820 0x140c9800,
821 0xf0150d98,
822 0x21f500e7,
823 0xa7f0015c,
824 0x0321f508,
825 0x0721f501,
826 0x2201f402,
827 0xf40ca7f0,
828 0x17f1c921,
829 0x14b60a10,
830 0x0527f006,
831/* 0x08ab: ctx_xfer_post_save_wait */
832 0xcf0012d0,
833 0x22fd0012,
834 0xfa1bf405,
835/* 0x08b7: ctx_xfer_post */
836 0xf02e02f4,
837 0x21f502f7,
838 0xf4bd0631,
839 0x067221f5,
840 0x022621f5,
841 0x064021f5,
842 0x21f5f4bd,
843 0x11f40631,
844 0x80019810,
845 0xf40511fd,
846 0x21f5070b,
847/* 0x08e2: ctx_xfer_no_post_mmio */
848/* 0x08e2: ctx_xfer_done */
849 0x00f807b1,
850 0x00000000,
851 0x00000000,
852 0x00000000,
853 0x00000000,
854 0x00000000,
855 0x00000000,
856 0x00000000,
857};
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
index e6b228844a32..e6b228844a32 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
new file mode 100644
index 000000000000..f16a5d53319d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
@@ -0,0 +1,400 @@
1/* fuc microcode util functions for nve0 PGRAPH
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
27define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
28
29ifdef(`include_code', `
30// Error codes
31define(`E_BAD_COMMAND', 0x01)
32define(`E_CMD_OVERFLOW', 0x02)
33
34// Util macros to help with debugging ucode hangs etc
35define(`T_WAIT', 0)
36define(`T_MMCTX', 1)
37define(`T_STRWAIT', 2)
38define(`T_STRINIT', 3)
39define(`T_AUTO', 4)
40define(`T_CHAN', 5)
41define(`T_LOAD', 6)
42define(`T_SAVE', 7)
43define(`T_LCHAN', 8)
44define(`T_LCTXH', 9)
45
46define(`trace_set', `
47 mov $r8 0x83c
48 shl b32 $r8 6
49 clear b32 $r9
50 bset $r9 $1
51 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
52')
53
54define(`trace_clr', `
55 mov $r8 0x85c
56 shl b32 $r8 6
57 clear b32 $r9
58 bset $r9 $1
59 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
60')
61
62// queue_put - add request to queue
63//
64// In : $r13 queue pointer
65// $r14 command
66// $r15 data
67//
68queue_put:
69 // make sure we have space..
70 ld b32 $r8 D[$r13 + 0x0] // GET
71 ld b32 $r9 D[$r13 + 0x4] // PUT
72 xor $r8 8
73 cmpu b32 $r8 $r9
74 bra ne #queue_put_next
75 mov $r15 E_CMD_OVERFLOW
76 call #error
77 ret
78
79 // store cmd/data on queue
80 queue_put_next:
81 and $r8 $r9 7
82 shl b32 $r8 3
83 add b32 $r8 $r13
84 add b32 $r8 8
85 st b32 D[$r8 + 0x0] $r14
86 st b32 D[$r8 + 0x4] $r15
87
88 // update PUT
89 add b32 $r9 1
90 and $r9 0xf
91 st b32 D[$r13 + 0x4] $r9
92 ret
93
94// queue_get - fetch request from queue
95//
96// In : $r13 queue pointer
97//
98// Out: $p1 clear on success (data available)
99// $r14 command
100// $r15 data
101//
102queue_get:
103 bset $flags $p1
104 ld b32 $r8 D[$r13 + 0x0] // GET
105 ld b32 $r9 D[$r13 + 0x4] // PUT
106 cmpu b32 $r8 $r9
107 bra e #queue_get_done
108 // fetch first cmd/data pair
109 and $r9 $r8 7
110 shl b32 $r9 3
111 add b32 $r9 $r13
112 add b32 $r9 8
113 ld b32 $r14 D[$r9 + 0x0]
114 ld b32 $r15 D[$r9 + 0x4]
115
116 // update GET
117 add b32 $r8 1
118 and $r8 0xf
119 st b32 D[$r13 + 0x0] $r8
120 bclr $flags $p1
121queue_get_done:
122 ret
123
124// nv_rd32 - read 32-bit value from nv register
125//
126// In : $r14 register
127// Out: $r15 value
128//
129nv_rd32:
130 mov $r11 0x728
131 shl b32 $r11 6
132 mov b32 $r12 $r14
133 bset $r12 31 // MMIO_CTRL_PENDING
134 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
135 nv_rd32_wait:
136 iord $r12 I[$r11 + 0x000]
137 xbit $r12 $r12 31
138 bra ne #nv_rd32_wait
139 mov $r10 6 // DONE_MMIO_RD
140 call #wait_doneo
141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
142 ret
143
144// nv_wr32 - write 32-bit value to nv register
145//
146// In : $r14 register
147// $r15 value
148//
149nv_wr32:
150 mov $r11 0x728
151 shl b32 $r11 6
152 iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
153 mov b32 $r12 $r14
154 bset $r12 31 // MMIO_CTRL_PENDING
155 bset $r12 30 // MMIO_CTRL_WRITE
156 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
157 nv_wr32_wait:
158 iord $r12 I[$r11 + 0x000]
159 xbit $r12 $r12 31
160 bra ne #nv_wr32_wait
161 ret
162
163// (re)set watchdog timer
164//
165// In : $r15 timeout
166//
167watchdog_reset:
168 mov $r8 0x430
169 shl b32 $r8 6
170 bset $r15 31
171 iowr I[$r8 + 0x000] $r15
172 ret
173
174// clear watchdog timer
175watchdog_clear:
176 mov $r8 0x430
177 shl b32 $r8 6
178 iowr I[$r8 + 0x000] $r0
179 ret
180
181// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
182//
183// In : $r10 bit to wait on
184//
185define(`wait_done', `
186$1:
187 trace_set(T_WAIT);
188 mov $r8 0x818
189 shl b32 $r8 6
190 iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
191 wait_done_$1:
192 mov $r8 0x400
193 shl b32 $r8 6
194 iord $r8 I[$r8 + 0x000] // DONE
195 xbit $r8 $r8 $r10
196 bra $2 #wait_done_$1
197 trace_clr(T_WAIT)
198 ret
199')
200wait_done(wait_donez, ne)
201wait_done(wait_doneo, e)
202
203// mmctx_size - determine size of a mmio list transfer
204//
205// In : $r14 mmio list head
206// $r15 mmio list tail
207// Out: $r15 transfer size (in bytes)
208//
209mmctx_size:
210 clear b32 $r9
211 nv_mmctx_size_loop:
212 ld b32 $r8 D[$r14]
213 shr b32 $r8 26
214 add b32 $r8 1
215 shl b32 $r8 2
216 add b32 $r9 $r8
217 add b32 $r14 4
218 cmpu b32 $r14 $r15
219 bra ne #nv_mmctx_size_loop
220 mov b32 $r15 $r9
221 ret
222
223// mmctx_xfer - execute a list of mmio transfers
224//
225// In : $r10 flags
226// bit 0: direction (0 = save, 1 = load)
227// bit 1: set if first transfer
228// bit 2: set if last transfer
229// $r11 base
230// $r12 mmio list head
231// $r13 mmio list tail
232// $r14 multi_stride
233// $r15 multi_mask
234//
235mmctx_xfer:
236 trace_set(T_MMCTX)
237 mov $r8 0x710
238 shl b32 $r8 6
239 clear b32 $r9
240 or $r11 $r11
241 bra e #mmctx_base_disabled
242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
243 bset $r9 0 // BASE_EN
244 mmctx_base_disabled:
245 or $r14 $r14
246 bra e #mmctx_multi_disabled
247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
249 bset $r9 1 // MULTI_EN
250 mmctx_multi_disabled:
251 add b32 $r8 0x100
252
253 xbit $r11 $r10 0
254 shl b32 $r11 16 // DIR
255 bset $r11 12 // QLIMIT = 0x10
256 xbit $r14 $r10 1
257 shl b32 $r14 17
258 or $r11 $r14 // START_TRIGGER
259 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
260
261 // loop over the mmio list, and send requests to the hw
262 mmctx_exec_loop:
263 // wait for space in mmctx queue
264 mmctx_wait_free:
265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
266 and $r14 0x1f
267 bra e #mmctx_wait_free
268
269 // queue up an entry
270 ld b32 $r14 D[$r12]
271 or $r14 $r9
272 iowr I[$r8 + 0x300] $r14
273 add b32 $r12 4
274 cmpu b32 $r12 $r13
275 bra ne #mmctx_exec_loop
276
277 xbit $r11 $r10 2
278 bra ne #mmctx_stop
279 // wait for queue to empty
280 mmctx_fini_wait:
281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
282 and $r11 0x1f
283 cmpu b32 $r11 0x10
284 bra ne #mmctx_fini_wait
285 mov $r10 2 // DONE_MMCTX
286 call #wait_donez
287 bra #mmctx_done
288 mmctx_stop:
289 xbit $r11 $r10 0
290 shl b32 $r11 16 // DIR
291 bset $r11 12 // QLIMIT = 0x10
292 bset $r11 18 // STOP_TRIGGER
293 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
294 mmctx_stop_wait:
295 // wait for STOP_TRIGGER to clear
296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
297 xbit $r11 $r11 18
298 bra ne #mmctx_stop_wait
299 mmctx_done:
300 trace_clr(T_MMCTX)
301 ret
302
303// Wait for DONE_STRAND
304//
305strand_wait:
306 push $r10
307 mov $r10 2
308 call #wait_donez
309 pop $r10
310 ret
311
312// unknown - call before issuing strand commands
313//
314strand_pre:
315 mov $r8 0x4afc
316 sethi $r8 0x20000
317 mov $r9 0xc
318 iowr I[$r8] $r9
319 call #strand_wait
320 ret
321
322// unknown - call after issuing strand commands
323//
324strand_post:
325 mov $r8 0x4afc
326 sethi $r8 0x20000
327 mov $r9 0xd
328 iowr I[$r8] $r9
329 call #strand_wait
330 ret
331
332// Selects strand set?!
333//
334// In: $r14 id
335//
336strand_set:
337 mov $r10 0x4ffc
338 sethi $r10 0x20000
339 sub b32 $r11 $r10 0x500
340 mov $r12 0xf
341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
342 mov $r12 0xb
343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
344 call #strand_wait
345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
346 mov $r12 0xa
347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
348 call #strand_wait
349 ret
350
351// Initialise strand context data
352//
353// In : $r15 context base
354// Out: $r15 context size (in bytes)
355//
356// Strandset(?) 3 hardcoded currently
357//
358strand_ctx_init:
359 trace_set(T_STRINIT)
360 call #strand_pre
361 mov $r14 3
362 call #strand_set
363 mov $r10 0x46fc
364 sethi $r10 0x20000
365 add b32 $r11 $r10 0x400
366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
367 mov $r12 1
368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
369 call #strand_wait
370 sub b32 $r12 $r0 1
371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
372 mov $r12 2
373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
374 call #strand_wait
375 call #strand_post
376
377 // read the size of each strand, poke the context offset of
378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
379 // about it later then.
380 mov $r8 0x880
381 shl b32 $r8 6
382 iord $r9 I[$r8 + 0x000] // STRANDS
383 add b32 $r8 0x2200
384 shr b32 $r14 $r15 8
385 ctx_init_strand_loop:
386 iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
387 iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
388 iord $r10 I[$r8 + 0x200] // STRAND_SIZE
389 shr b32 $r10 6
390 add b32 $r10 1
391 add b32 $r14 $r10
392 add b32 $r8 4
393 sub b32 $r9 1
394 bra ne #ctx_init_strand_loop
395
396 shl b32 $r14 8
397 sub b32 $r15 $r14 $r15
398 trace_clr(T_STRINIT)
399 ret
400')
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
new file mode 100644
index 000000000000..618528248457
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -0,0 +1,1387 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/namedb.h>
29
30#include <subdev/fb.h>
31#include <subdev/instmem.h>
32#include <subdev/timer.h>
33
34#include <engine/fifo.h>
35#include <engine/graph.h>
36
37#include "regs.h"
38
39static u32
40nv04_graph_ctx_regs[] = {
41 0x0040053c,
42 0x00400544,
43 0x00400540,
44 0x00400548,
45 NV04_PGRAPH_CTX_SWITCH1,
46 NV04_PGRAPH_CTX_SWITCH2,
47 NV04_PGRAPH_CTX_SWITCH3,
48 NV04_PGRAPH_CTX_SWITCH4,
49 NV04_PGRAPH_CTX_CACHE1,
50 NV04_PGRAPH_CTX_CACHE2,
51 NV04_PGRAPH_CTX_CACHE3,
52 NV04_PGRAPH_CTX_CACHE4,
53 0x00400184,
54 0x004001a4,
55 0x004001c4,
56 0x004001e4,
57 0x00400188,
58 0x004001a8,
59 0x004001c8,
60 0x004001e8,
61 0x0040018c,
62 0x004001ac,
63 0x004001cc,
64 0x004001ec,
65 0x00400190,
66 0x004001b0,
67 0x004001d0,
68 0x004001f0,
69 0x00400194,
70 0x004001b4,
71 0x004001d4,
72 0x004001f4,
73 0x00400198,
74 0x004001b8,
75 0x004001d8,
76 0x004001f8,
77 0x0040019c,
78 0x004001bc,
79 0x004001dc,
80 0x004001fc,
81 0x00400174,
82 NV04_PGRAPH_DMA_START_0,
83 NV04_PGRAPH_DMA_START_1,
84 NV04_PGRAPH_DMA_LENGTH,
85 NV04_PGRAPH_DMA_MISC,
86 NV04_PGRAPH_DMA_PITCH,
87 NV04_PGRAPH_BOFFSET0,
88 NV04_PGRAPH_BBASE0,
89 NV04_PGRAPH_BLIMIT0,
90 NV04_PGRAPH_BOFFSET1,
91 NV04_PGRAPH_BBASE1,
92 NV04_PGRAPH_BLIMIT1,
93 NV04_PGRAPH_BOFFSET2,
94 NV04_PGRAPH_BBASE2,
95 NV04_PGRAPH_BLIMIT2,
96 NV04_PGRAPH_BOFFSET3,
97 NV04_PGRAPH_BBASE3,
98 NV04_PGRAPH_BLIMIT3,
99 NV04_PGRAPH_BOFFSET4,
100 NV04_PGRAPH_BBASE4,
101 NV04_PGRAPH_BLIMIT4,
102 NV04_PGRAPH_BOFFSET5,
103 NV04_PGRAPH_BBASE5,
104 NV04_PGRAPH_BLIMIT5,
105 NV04_PGRAPH_BPITCH0,
106 NV04_PGRAPH_BPITCH1,
107 NV04_PGRAPH_BPITCH2,
108 NV04_PGRAPH_BPITCH3,
109 NV04_PGRAPH_BPITCH4,
110 NV04_PGRAPH_SURFACE,
111 NV04_PGRAPH_STATE,
112 NV04_PGRAPH_BSWIZZLE2,
113 NV04_PGRAPH_BSWIZZLE5,
114 NV04_PGRAPH_BPIXEL,
115 NV04_PGRAPH_NOTIFY,
116 NV04_PGRAPH_PATT_COLOR0,
117 NV04_PGRAPH_PATT_COLOR1,
118 NV04_PGRAPH_PATT_COLORRAM+0x00,
119 NV04_PGRAPH_PATT_COLORRAM+0x04,
120 NV04_PGRAPH_PATT_COLORRAM+0x08,
121 NV04_PGRAPH_PATT_COLORRAM+0x0c,
122 NV04_PGRAPH_PATT_COLORRAM+0x10,
123 NV04_PGRAPH_PATT_COLORRAM+0x14,
124 NV04_PGRAPH_PATT_COLORRAM+0x18,
125 NV04_PGRAPH_PATT_COLORRAM+0x1c,
126 NV04_PGRAPH_PATT_COLORRAM+0x20,
127 NV04_PGRAPH_PATT_COLORRAM+0x24,
128 NV04_PGRAPH_PATT_COLORRAM+0x28,
129 NV04_PGRAPH_PATT_COLORRAM+0x2c,
130 NV04_PGRAPH_PATT_COLORRAM+0x30,
131 NV04_PGRAPH_PATT_COLORRAM+0x34,
132 NV04_PGRAPH_PATT_COLORRAM+0x38,
133 NV04_PGRAPH_PATT_COLORRAM+0x3c,
134 NV04_PGRAPH_PATT_COLORRAM+0x40,
135 NV04_PGRAPH_PATT_COLORRAM+0x44,
136 NV04_PGRAPH_PATT_COLORRAM+0x48,
137 NV04_PGRAPH_PATT_COLORRAM+0x4c,
138 NV04_PGRAPH_PATT_COLORRAM+0x50,
139 NV04_PGRAPH_PATT_COLORRAM+0x54,
140 NV04_PGRAPH_PATT_COLORRAM+0x58,
141 NV04_PGRAPH_PATT_COLORRAM+0x5c,
142 NV04_PGRAPH_PATT_COLORRAM+0x60,
143 NV04_PGRAPH_PATT_COLORRAM+0x64,
144 NV04_PGRAPH_PATT_COLORRAM+0x68,
145 NV04_PGRAPH_PATT_COLORRAM+0x6c,
146 NV04_PGRAPH_PATT_COLORRAM+0x70,
147 NV04_PGRAPH_PATT_COLORRAM+0x74,
148 NV04_PGRAPH_PATT_COLORRAM+0x78,
149 NV04_PGRAPH_PATT_COLORRAM+0x7c,
150 NV04_PGRAPH_PATT_COLORRAM+0x80,
151 NV04_PGRAPH_PATT_COLORRAM+0x84,
152 NV04_PGRAPH_PATT_COLORRAM+0x88,
153 NV04_PGRAPH_PATT_COLORRAM+0x8c,
154 NV04_PGRAPH_PATT_COLORRAM+0x90,
155 NV04_PGRAPH_PATT_COLORRAM+0x94,
156 NV04_PGRAPH_PATT_COLORRAM+0x98,
157 NV04_PGRAPH_PATT_COLORRAM+0x9c,
158 NV04_PGRAPH_PATT_COLORRAM+0xa0,
159 NV04_PGRAPH_PATT_COLORRAM+0xa4,
160 NV04_PGRAPH_PATT_COLORRAM+0xa8,
161 NV04_PGRAPH_PATT_COLORRAM+0xac,
162 NV04_PGRAPH_PATT_COLORRAM+0xb0,
163 NV04_PGRAPH_PATT_COLORRAM+0xb4,
164 NV04_PGRAPH_PATT_COLORRAM+0xb8,
165 NV04_PGRAPH_PATT_COLORRAM+0xbc,
166 NV04_PGRAPH_PATT_COLORRAM+0xc0,
167 NV04_PGRAPH_PATT_COLORRAM+0xc4,
168 NV04_PGRAPH_PATT_COLORRAM+0xc8,
169 NV04_PGRAPH_PATT_COLORRAM+0xcc,
170 NV04_PGRAPH_PATT_COLORRAM+0xd0,
171 NV04_PGRAPH_PATT_COLORRAM+0xd4,
172 NV04_PGRAPH_PATT_COLORRAM+0xd8,
173 NV04_PGRAPH_PATT_COLORRAM+0xdc,
174 NV04_PGRAPH_PATT_COLORRAM+0xe0,
175 NV04_PGRAPH_PATT_COLORRAM+0xe4,
176 NV04_PGRAPH_PATT_COLORRAM+0xe8,
177 NV04_PGRAPH_PATT_COLORRAM+0xec,
178 NV04_PGRAPH_PATT_COLORRAM+0xf0,
179 NV04_PGRAPH_PATT_COLORRAM+0xf4,
180 NV04_PGRAPH_PATT_COLORRAM+0xf8,
181 NV04_PGRAPH_PATT_COLORRAM+0xfc,
182 NV04_PGRAPH_PATTERN,
183 0x0040080c,
184 NV04_PGRAPH_PATTERN_SHAPE,
185 0x00400600,
186 NV04_PGRAPH_ROP3,
187 NV04_PGRAPH_CHROMA,
188 NV04_PGRAPH_BETA_AND,
189 NV04_PGRAPH_BETA_PREMULT,
190 NV04_PGRAPH_CONTROL0,
191 NV04_PGRAPH_CONTROL1,
192 NV04_PGRAPH_CONTROL2,
193 NV04_PGRAPH_BLEND,
194 NV04_PGRAPH_STORED_FMT,
195 NV04_PGRAPH_SOURCE_COLOR,
196 0x00400560,
197 0x00400568,
198 0x00400564,
199 0x0040056c,
200 0x00400400,
201 0x00400480,
202 0x00400404,
203 0x00400484,
204 0x00400408,
205 0x00400488,
206 0x0040040c,
207 0x0040048c,
208 0x00400410,
209 0x00400490,
210 0x00400414,
211 0x00400494,
212 0x00400418,
213 0x00400498,
214 0x0040041c,
215 0x0040049c,
216 0x00400420,
217 0x004004a0,
218 0x00400424,
219 0x004004a4,
220 0x00400428,
221 0x004004a8,
222 0x0040042c,
223 0x004004ac,
224 0x00400430,
225 0x004004b0,
226 0x00400434,
227 0x004004b4,
228 0x00400438,
229 0x004004b8,
230 0x0040043c,
231 0x004004bc,
232 0x00400440,
233 0x004004c0,
234 0x00400444,
235 0x004004c4,
236 0x00400448,
237 0x004004c8,
238 0x0040044c,
239 0x004004cc,
240 0x00400450,
241 0x004004d0,
242 0x00400454,
243 0x004004d4,
244 0x00400458,
245 0x004004d8,
246 0x0040045c,
247 0x004004dc,
248 0x00400460,
249 0x004004e0,
250 0x00400464,
251 0x004004e4,
252 0x00400468,
253 0x004004e8,
254 0x0040046c,
255 0x004004ec,
256 0x00400470,
257 0x004004f0,
258 0x00400474,
259 0x004004f4,
260 0x00400478,
261 0x004004f8,
262 0x0040047c,
263 0x004004fc,
264 0x00400534,
265 0x00400538,
266 0x00400514,
267 0x00400518,
268 0x0040051c,
269 0x00400520,
270 0x00400524,
271 0x00400528,
272 0x0040052c,
273 0x00400530,
274 0x00400d00,
275 0x00400d40,
276 0x00400d80,
277 0x00400d04,
278 0x00400d44,
279 0x00400d84,
280 0x00400d08,
281 0x00400d48,
282 0x00400d88,
283 0x00400d0c,
284 0x00400d4c,
285 0x00400d8c,
286 0x00400d10,
287 0x00400d50,
288 0x00400d90,
289 0x00400d14,
290 0x00400d54,
291 0x00400d94,
292 0x00400d18,
293 0x00400d58,
294 0x00400d98,
295 0x00400d1c,
296 0x00400d5c,
297 0x00400d9c,
298 0x00400d20,
299 0x00400d60,
300 0x00400da0,
301 0x00400d24,
302 0x00400d64,
303 0x00400da4,
304 0x00400d28,
305 0x00400d68,
306 0x00400da8,
307 0x00400d2c,
308 0x00400d6c,
309 0x00400dac,
310 0x00400d30,
311 0x00400d70,
312 0x00400db0,
313 0x00400d34,
314 0x00400d74,
315 0x00400db4,
316 0x00400d38,
317 0x00400d78,
318 0x00400db8,
319 0x00400d3c,
320 0x00400d7c,
321 0x00400dbc,
322 0x00400590,
323 0x00400594,
324 0x00400598,
325 0x0040059c,
326 0x004005a8,
327 0x004005ac,
328 0x004005b0,
329 0x004005b4,
330 0x004005c0,
331 0x004005c4,
332 0x004005c8,
333 0x004005cc,
334 0x004005d0,
335 0x004005d4,
336 0x004005d8,
337 0x004005dc,
338 0x004005e0,
339 NV04_PGRAPH_PASSTHRU_0,
340 NV04_PGRAPH_PASSTHRU_1,
341 NV04_PGRAPH_PASSTHRU_2,
342 NV04_PGRAPH_DVD_COLORFMT,
343 NV04_PGRAPH_SCALED_FORMAT,
344 NV04_PGRAPH_MISC24_0,
345 NV04_PGRAPH_MISC24_1,
346 NV04_PGRAPH_MISC24_2,
347 0x00400500,
348 0x00400504,
349 NV04_PGRAPH_VALID1,
350 NV04_PGRAPH_VALID2,
351 NV04_PGRAPH_DEBUG_3
352};
353
354struct nv04_graph_priv {
355 struct nouveau_graph base;
356 struct nv04_graph_chan *chan[16];
357 spinlock_t lock;
358};
359
360struct nv04_graph_chan {
361 struct nouveau_object base;
362 int chid;
363 u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
364};
365
366
367static inline struct nv04_graph_priv *
368nv04_graph_priv(struct nv04_graph_chan *chan)
369{
370 return (void *)nv_object(chan)->engine;
371}
372
373/*******************************************************************************
374 * Graphics object classes
375 ******************************************************************************/
376
377/*
378 * Software methods, why they are needed, and how they all work:
379 *
380 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
381 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
382 * 3 words long on both. grobj format on NV04 is:
383 *
384 * word 0:
385 * - bits 0-7: class
386 * - bit 12: color key active
387 * - bit 13: clip rect active
388 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
389 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
390 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
391 * NV03_CONTEXT_SURFACE_DST].
392 * - bits 15-17: 2d operation [aka patch config]
393 * - bit 24: patch valid [enables rendering using this object]
394 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
395 * word 1:
396 * - bits 0-1: mono format
397 * - bits 8-13: color format
398 * - bits 16-31: DMA_NOTIFY instance
399 * word 2:
400 * - bits 0-15: DMA_A instance
401 * - bits 16-31: DMA_B instance
402 *
403 * On NV05 it's:
404 *
405 * word 0:
406 * - bits 0-7: class
407 * - bit 12: color key active
408 * - bit 13: clip rect active
409 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
410 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
411 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
412 * NV03_CONTEXT_SURFACE_DST].
413 * - bits 15-17: 2d operation [aka patch config]
414 * - bits 20-22: dither mode
415 * - bit 24: patch valid [enables rendering using this object]
416 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
417 * - bit 26: surface_src/surface_zeta valid
418 * - bit 27: pattern valid
419 * - bit 28: rop valid
420 * - bit 29: beta1 valid
421 * - bit 30: beta4 valid
422 * word 1:
423 * - bits 0-1: mono format
424 * - bits 8-13: color format
425 * - bits 16-31: DMA_NOTIFY instance
426 * word 2:
427 * - bits 0-15: DMA_A instance
428 * - bits 16-31: DMA_B instance
429 *
430 * NV05 will set/unset the relevant valid bits when you poke the relevant
431 * object-binding methods with object of the proper type, or with the NULL
432 * type. It'll only allow rendering using the grobj if all needed objects
433 * are bound. The needed set of objects depends on selected operation: for
434 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
435 *
436 * NV04 doesn't have these methods implemented at all, and doesn't have the
437 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
438 * is set. So we have to emulate them in software, internally keeping the
439 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
440 * but the last word isn't actually used for anything, we abuse it for this
441 * purpose.
442 *
443 * Actually, NV05 can optionally check bit 24 too, but we disable this since
444 * there's no use for it.
445 *
446 * For unknown reasons, NV04 implements surf3d binding in hardware as an
447 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
448 * methods on the surf3d object, so we have to emulate them too.
449 */
450
451static void
452nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
453{
454 struct nv04_graph_priv *priv = (void *)object->engine;
455 int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
456 u32 tmp;
457
458 tmp = nv_ro32(object, 0x00);
459 tmp &= ~mask;
460 tmp |= value;
461 nv_wo32(object, 0x00, tmp);
462
463 nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
464 nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
465}
466
467static void
468nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
469{
470 int class, op, valid = 1;
471 u32 tmp, ctx1;
472
473 ctx1 = nv_ro32(object, 0x00);
474 class = ctx1 & 0xff;
475 op = (ctx1 >> 15) & 7;
476
477 tmp = nv_ro32(object, 0x0c);
478 tmp &= ~mask;
479 tmp |= value;
480 nv_wo32(object, 0x0c, tmp);
481
482 /* check for valid surf2d/surf_dst/surf_color */
483 if (!(tmp & 0x02000000))
484 valid = 0;
485 /* check for valid surf_src/surf_zeta */
486 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
487 valid = 0;
488
489 switch (op) {
490 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
491 case 0:
492 case 3:
493 break;
494 /* ROP_AND: requires pattern and rop */
495 case 1:
496 if (!(tmp & 0x18000000))
497 valid = 0;
498 break;
499 /* BLEND_AND: requires beta1 */
500 case 2:
501 if (!(tmp & 0x20000000))
502 valid = 0;
503 break;
504 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
505 case 4:
506 case 5:
507 if (!(tmp & 0x40000000))
508 valid = 0;
509 break;
510 }
511
512 nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
513}
514
515static int
516nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
517 void *args, u32 size)
518{
519 u32 class = nv_ro32(object, 0) & 0xff;
520 u32 data = *(u32 *)args;
521 if (data > 5)
522 return 1;
523 /* Old versions of the objects only accept first three operations. */
524 if (data > 2 && class < 0x40)
525 return 1;
526 nv04_graph_set_ctx1(object, 0x00038000, data << 15);
527 /* changing operation changes set of objects needed for validation */
528 nv04_graph_set_ctx_val(object, 0, 0);
529 return 0;
530}
531
532static int
533nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
534 void *args, u32 size)
535{
536 struct nv04_graph_priv *priv = (void *)object->engine;
537 u32 data = *(u32 *)args;
538 u32 min = data & 0xffff, max;
539 u32 w = data >> 16;
540 if (min & 0x8000)
541 /* too large */
542 return 1;
543 if (w & 0x8000)
544 /* yes, it accepts negative for some reason. */
545 w |= 0xffff0000;
546 max = min + w;
547 max &= 0x3ffff;
548 nv_wr32(priv, 0x40053c, min);
549 nv_wr32(priv, 0x400544, max);
550 return 0;
551}
552
553static int
554nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
555 void *args, u32 size)
556{
557 struct nv04_graph_priv *priv = (void *)object->engine;
558 u32 data = *(u32 *)args;
559 u32 min = data & 0xffff, max;
560 u32 w = data >> 16;
561 if (min & 0x8000)
562 /* too large */
563 return 1;
564 if (w & 0x8000)
565 /* yes, it accepts negative for some reason. */
566 w |= 0xffff0000;
567 max = min + w;
568 max &= 0x3ffff;
569 nv_wr32(priv, 0x400540, min);
570 nv_wr32(priv, 0x400548, max);
571 return 0;
572}
573
574static u16
575nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
576{
577 struct nouveau_instmem *imem = nouveau_instmem(object);
578 u32 inst = *(u32 *)args << 4;
579 return nv_ro32(imem, inst);
580}
581
582static int
583nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
584 void *args, u32 size)
585{
586 switch (nv04_graph_mthd_bind_class(object, args, size)) {
587 case 0x30:
588 nv04_graph_set_ctx1(object, 0x00004000, 0);
589 nv04_graph_set_ctx_val(object, 0x02000000, 0);
590 return 0;
591 case 0x42:
592 nv04_graph_set_ctx1(object, 0x00004000, 0);
593 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
594 return 0;
595 }
596 return 1;
597}
598
599static int
600nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
601 void *args, u32 size)
602{
603 switch (nv04_graph_mthd_bind_class(object, args, size)) {
604 case 0x30:
605 nv04_graph_set_ctx1(object, 0x00004000, 0);
606 nv04_graph_set_ctx_val(object, 0x02000000, 0);
607 return 0;
608 case 0x42:
609 nv04_graph_set_ctx1(object, 0x00004000, 0);
610 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
611 return 0;
612 case 0x52:
613 nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
614 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
615 return 0;
616 }
617 return 1;
618}
619
620static int
621nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
622 void *args, u32 size)
623{
624 switch (nv04_graph_mthd_bind_class(object, args, size)) {
625 case 0x30:
626 nv04_graph_set_ctx_val(object, 0x08000000, 0);
627 return 0;
628 case 0x18:
629 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
630 return 0;
631 }
632 return 1;
633}
634
635static int
636nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
637 void *args, u32 size)
638{
639 switch (nv04_graph_mthd_bind_class(object, args, size)) {
640 case 0x30:
641 nv04_graph_set_ctx_val(object, 0x08000000, 0);
642 return 0;
643 case 0x44:
644 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
645 return 0;
646 }
647 return 1;
648}
649
650static int
651nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
652 void *args, u32 size)
653{
654 switch (nv04_graph_mthd_bind_class(object, args, size)) {
655 case 0x30:
656 nv04_graph_set_ctx_val(object, 0x10000000, 0);
657 return 0;
658 case 0x43:
659 nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
660 return 0;
661 }
662 return 1;
663}
664
665static int
666nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
667 void *args, u32 size)
668{
669 switch (nv04_graph_mthd_bind_class(object, args, size)) {
670 case 0x30:
671 nv04_graph_set_ctx_val(object, 0x20000000, 0);
672 return 0;
673 case 0x12:
674 nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
675 return 0;
676 }
677 return 1;
678}
679
680static int
681nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
682 void *args, u32 size)
683{
684 switch (nv04_graph_mthd_bind_class(object, args, size)) {
685 case 0x30:
686 nv04_graph_set_ctx_val(object, 0x40000000, 0);
687 return 0;
688 case 0x72:
689 nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
690 return 0;
691 }
692 return 1;
693}
694
695static int
696nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
697 void *args, u32 size)
698{
699 switch (nv04_graph_mthd_bind_class(object, args, size)) {
700 case 0x30:
701 nv04_graph_set_ctx_val(object, 0x02000000, 0);
702 return 0;
703 case 0x58:
704 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
705 return 0;
706 }
707 return 1;
708}
709
710static int
711nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
712 void *args, u32 size)
713{
714 switch (nv04_graph_mthd_bind_class(object, args, size)) {
715 case 0x30:
716 nv04_graph_set_ctx_val(object, 0x04000000, 0);
717 return 0;
718 case 0x59:
719 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
720 return 0;
721 }
722 return 1;
723}
724
725static int
726nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
727 void *args, u32 size)
728{
729 switch (nv04_graph_mthd_bind_class(object, args, size)) {
730 case 0x30:
731 nv04_graph_set_ctx_val(object, 0x02000000, 0);
732 return 0;
733 case 0x5a:
734 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
735 return 0;
736 }
737 return 1;
738}
739
740static int
741nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
742 void *args, u32 size)
743{
744 switch (nv04_graph_mthd_bind_class(object, args, size)) {
745 case 0x30:
746 nv04_graph_set_ctx_val(object, 0x04000000, 0);
747 return 0;
748 case 0x5b:
749 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
750 return 0;
751 }
752 return 1;
753}
754
755static int
756nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
757 void *args, u32 size)
758{
759 switch (nv04_graph_mthd_bind_class(object, args, size)) {
760 case 0x30:
761 nv04_graph_set_ctx1(object, 0x2000, 0);
762 return 0;
763 case 0x19:
764 nv04_graph_set_ctx1(object, 0x2000, 0x2000);
765 return 0;
766 }
767 return 1;
768}
769
770static int
771nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
772 void *args, u32 size)
773{
774 switch (nv04_graph_mthd_bind_class(object, args, size)) {
775 case 0x30:
776 nv04_graph_set_ctx1(object, 0x1000, 0);
777 return 0;
778 /* Yes, for some reason even the old versions of objects
779 * accept 0x57 and not 0x17. Consistency be damned.
780 */
781 case 0x57:
782 nv04_graph_set_ctx1(object, 0x1000, 0x1000);
783 return 0;
784 }
785 return 1;
786}
787
788static struct nouveau_omthds
789nv03_graph_gdi_omthds[] = {
790 { 0x0184, nv01_graph_mthd_bind_patt },
791 { 0x0188, nv04_graph_mthd_bind_rop },
792 { 0x018c, nv04_graph_mthd_bind_beta1 },
793 { 0x0190, nv04_graph_mthd_bind_surf_dst },
794 { 0x02fc, nv04_graph_mthd_set_operation },
795 {}
796};
797
798static struct nouveau_omthds
799nv04_graph_gdi_omthds[] = {
800 { 0x0188, nv04_graph_mthd_bind_patt },
801 { 0x018c, nv04_graph_mthd_bind_rop },
802 { 0x0190, nv04_graph_mthd_bind_beta1 },
803 { 0x0194, nv04_graph_mthd_bind_beta4 },
804 { 0x0198, nv04_graph_mthd_bind_surf2d },
805 { 0x02fc, nv04_graph_mthd_set_operation },
806 {}
807};
808
809static struct nouveau_omthds
810nv01_graph_blit_omthds[] = {
811 { 0x0184, nv01_graph_mthd_bind_chroma },
812 { 0x0188, nv01_graph_mthd_bind_clip },
813 { 0x018c, nv01_graph_mthd_bind_patt },
814 { 0x0190, nv04_graph_mthd_bind_rop },
815 { 0x0194, nv04_graph_mthd_bind_beta1 },
816 { 0x0198, nv04_graph_mthd_bind_surf_dst },
817 { 0x019c, nv04_graph_mthd_bind_surf_src },
818 { 0x02fc, nv04_graph_mthd_set_operation },
819 {}
820};
821
822static struct nouveau_omthds
823nv04_graph_blit_omthds[] = {
824 { 0x0184, nv01_graph_mthd_bind_chroma },
825 { 0x0188, nv01_graph_mthd_bind_clip },
826 { 0x018c, nv04_graph_mthd_bind_patt },
827 { 0x0190, nv04_graph_mthd_bind_rop },
828 { 0x0194, nv04_graph_mthd_bind_beta1 },
829 { 0x0198, nv04_graph_mthd_bind_beta4 },
830 { 0x019c, nv04_graph_mthd_bind_surf2d },
831 { 0x02fc, nv04_graph_mthd_set_operation },
832 {}
833};
834
835static struct nouveau_omthds
836nv04_graph_iifc_omthds[] = {
837 { 0x0188, nv01_graph_mthd_bind_chroma },
838 { 0x018c, nv01_graph_mthd_bind_clip },
839 { 0x0190, nv04_graph_mthd_bind_patt },
840 { 0x0194, nv04_graph_mthd_bind_rop },
841 { 0x0198, nv04_graph_mthd_bind_beta1 },
842 { 0x019c, nv04_graph_mthd_bind_beta4 },
843 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
844 { 0x03e4, nv04_graph_mthd_set_operation },
845 {}
846};
847
848static struct nouveau_omthds
849nv01_graph_ifc_omthds[] = {
850 { 0x0184, nv01_graph_mthd_bind_chroma },
851 { 0x0188, nv01_graph_mthd_bind_clip },
852 { 0x018c, nv01_graph_mthd_bind_patt },
853 { 0x0190, nv04_graph_mthd_bind_rop },
854 { 0x0194, nv04_graph_mthd_bind_beta1 },
855 { 0x0198, nv04_graph_mthd_bind_surf_dst },
856 { 0x02fc, nv04_graph_mthd_set_operation },
857 {}
858};
859
860static struct nouveau_omthds
861nv04_graph_ifc_omthds[] = {
862 { 0x0184, nv01_graph_mthd_bind_chroma },
863 { 0x0188, nv01_graph_mthd_bind_clip },
864 { 0x018c, nv04_graph_mthd_bind_patt },
865 { 0x0190, nv04_graph_mthd_bind_rop },
866 { 0x0194, nv04_graph_mthd_bind_beta1 },
867 { 0x0198, nv04_graph_mthd_bind_beta4 },
868 { 0x019c, nv04_graph_mthd_bind_surf2d },
869 { 0x02fc, nv04_graph_mthd_set_operation },
870 {}
871};
872
873static struct nouveau_omthds
874nv03_graph_sifc_omthds[] = {
875 { 0x0184, nv01_graph_mthd_bind_chroma },
876 { 0x0188, nv01_graph_mthd_bind_patt },
877 { 0x018c, nv04_graph_mthd_bind_rop },
878 { 0x0190, nv04_graph_mthd_bind_beta1 },
879 { 0x0194, nv04_graph_mthd_bind_surf_dst },
880 { 0x02fc, nv04_graph_mthd_set_operation },
881 {}
882};
883
884static struct nouveau_omthds
885nv04_graph_sifc_omthds[] = {
886 { 0x0184, nv01_graph_mthd_bind_chroma },
887 { 0x0188, nv04_graph_mthd_bind_patt },
888 { 0x018c, nv04_graph_mthd_bind_rop },
889 { 0x0190, nv04_graph_mthd_bind_beta1 },
890 { 0x0194, nv04_graph_mthd_bind_beta4 },
891 { 0x0198, nv04_graph_mthd_bind_surf2d },
892 { 0x02fc, nv04_graph_mthd_set_operation },
893 {}
894};
895
896static struct nouveau_omthds
897nv03_graph_sifm_omthds[] = {
898 { 0x0188, nv01_graph_mthd_bind_patt },
899 { 0x018c, nv04_graph_mthd_bind_rop },
900 { 0x0190, nv04_graph_mthd_bind_beta1 },
901 { 0x0194, nv04_graph_mthd_bind_surf_dst },
902 { 0x0304, nv04_graph_mthd_set_operation },
903 {}
904};
905
906static struct nouveau_omthds
907nv04_graph_sifm_omthds[] = {
908 { 0x0188, nv04_graph_mthd_bind_patt },
909 { 0x018c, nv04_graph_mthd_bind_rop },
910 { 0x0190, nv04_graph_mthd_bind_beta1 },
911 { 0x0194, nv04_graph_mthd_bind_beta4 },
912 { 0x0198, nv04_graph_mthd_bind_surf2d },
913 { 0x0304, nv04_graph_mthd_set_operation },
914 {}
915};
916
917static struct nouveau_omthds
918nv04_graph_surf3d_omthds[] = {
919 { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
920 { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
921 {}
922};
923
924static struct nouveau_omthds
925nv03_graph_ttri_omthds[] = {
926 { 0x0188, nv01_graph_mthd_bind_clip },
927 { 0x018c, nv04_graph_mthd_bind_surf_color },
928 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
929 {}
930};
931
932static struct nouveau_omthds
933nv01_graph_prim_omthds[] = {
934 { 0x0184, nv01_graph_mthd_bind_clip },
935 { 0x0188, nv01_graph_mthd_bind_patt },
936 { 0x018c, nv04_graph_mthd_bind_rop },
937 { 0x0190, nv04_graph_mthd_bind_beta1 },
938 { 0x0194, nv04_graph_mthd_bind_surf_dst },
939 { 0x02fc, nv04_graph_mthd_set_operation },
940 {}
941};
942
943static struct nouveau_omthds
944nv04_graph_prim_omthds[] = {
945 { 0x0184, nv01_graph_mthd_bind_clip },
946 { 0x0188, nv04_graph_mthd_bind_patt },
947 { 0x018c, nv04_graph_mthd_bind_rop },
948 { 0x0190, nv04_graph_mthd_bind_beta1 },
949 { 0x0194, nv04_graph_mthd_bind_beta4 },
950 { 0x0198, nv04_graph_mthd_bind_surf2d },
951 { 0x02fc, nv04_graph_mthd_set_operation },
952 {}
953};
954
955static int
956nv04_graph_object_ctor(struct nouveau_object *parent,
957 struct nouveau_object *engine,
958 struct nouveau_oclass *oclass, void *data, u32 size,
959 struct nouveau_object **pobject)
960{
961 struct nouveau_gpuobj *obj;
962 int ret;
963
964 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
965 16, 16, 0, &obj);
966 *pobject = nv_object(obj);
967 if (ret)
968 return ret;
969
970 nv_wo32(obj, 0x00, nv_mclass(obj));
971#ifdef __BIG_ENDIAN
972 nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
973#endif
974 nv_wo32(obj, 0x04, 0x00000000);
975 nv_wo32(obj, 0x08, 0x00000000);
976 nv_wo32(obj, 0x0c, 0x00000000);
977 return 0;
978}
979
980struct nouveau_ofuncs
981nv04_graph_ofuncs = {
982 .ctor = nv04_graph_object_ctor,
983 .dtor = _nouveau_gpuobj_dtor,
984 .init = _nouveau_gpuobj_init,
985 .fini = _nouveau_gpuobj_fini,
986 .rd32 = _nouveau_gpuobj_rd32,
987 .wr32 = _nouveau_gpuobj_wr32,
988};
989
990static struct nouveau_oclass
991nv04_graph_sclass[] = {
992 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
993 { 0x0017, &nv04_graph_ofuncs }, /* chroma */
994 { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
995 { 0x0019, &nv04_graph_ofuncs }, /* clip */
996 { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
997 { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
998 { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
999 { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
1000 { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
1001 { 0x0030, &nv04_graph_ofuncs }, /* null */
1002 { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
1003 { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
1004 { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
1005 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
1006 { 0x0042, &nv04_graph_ofuncs }, /* surf2d */
1007 { 0x0043, &nv04_graph_ofuncs }, /* rop */
1008 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
1009 { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
1010 { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
1011 { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
1012 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
1013 { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
1014 { 0x0054, &nv04_graph_ofuncs }, /* ttri */
1015 { 0x0055, &nv04_graph_ofuncs }, /* mtri */
1016 { 0x0057, &nv04_graph_ofuncs }, /* chroma */
1017 { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
1018 { 0x0059, &nv04_graph_ofuncs }, /* surf_src */
1019 { 0x005a, &nv04_graph_ofuncs }, /* surf_color */
1020 { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
1021 { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
1022 { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
1023 { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
1024 { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
1025 { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
1026 { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
1027 { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
1028 { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
1029 { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
1030 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
1031 { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
1032 { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
1033 {},
1034};
1035
1036/*******************************************************************************
1037 * PGRAPH context
1038 ******************************************************************************/
1039
1040static struct nv04_graph_chan *
1041nv04_graph_channel(struct nv04_graph_priv *priv)
1042{
1043 struct nv04_graph_chan *chan = NULL;
1044 if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1045 int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
1046 if (chid < ARRAY_SIZE(priv->chan))
1047 chan = priv->chan[chid];
1048 }
1049 return chan;
1050}
1051
1052static int
1053nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
1054{
1055 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1056 int i;
1057
1058 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1059 nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
1060
1061 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1062 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1063 nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1064 return 0;
1065}
1066
1067static int
1068nv04_graph_unload_context(struct nv04_graph_chan *chan)
1069{
1070 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1071 int i;
1072
1073 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1074 chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
1075
1076 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1077 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1078 return 0;
1079}
1080
1081static void
1082nv04_graph_context_switch(struct nv04_graph_priv *priv)
1083{
1084 struct nv04_graph_chan *prev = NULL;
1085 struct nv04_graph_chan *next = NULL;
1086 unsigned long flags;
1087 int chid;
1088
1089 spin_lock_irqsave(&priv->lock, flags);
1090 nv04_graph_idle(priv);
1091
1092 /* If previous context is valid, we need to save it */
1093 prev = nv04_graph_channel(priv);
1094 if (prev)
1095 nv04_graph_unload_context(prev);
1096
1097 /* load context for next channel */
1098 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1099 next = priv->chan[chid];
1100 if (next)
1101 nv04_graph_load_context(next, chid);
1102
1103 spin_unlock_irqrestore(&priv->lock, flags);
1104}
1105
1106static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
1107{
1108 int i;
1109
1110 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
1111 if (nv04_graph_ctx_regs[i] == reg)
1112 return &chan->nv04[i];
1113 }
1114
1115 return NULL;
1116}
1117
1118static int
1119nv04_graph_context_ctor(struct nouveau_object *parent,
1120 struct nouveau_object *engine,
1121 struct nouveau_oclass *oclass, void *data, u32 size,
1122 struct nouveau_object **pobject)
1123{
1124 struct nouveau_fifo_chan *fifo = (void *)parent;
1125 struct nv04_graph_priv *priv = (void *)engine;
1126 struct nv04_graph_chan *chan;
1127 unsigned long flags;
1128 int ret;
1129
1130 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
1131 *pobject = nv_object(chan);
1132 if (ret)
1133 return ret;
1134
1135 spin_lock_irqsave(&priv->lock, flags);
1136 if (priv->chan[fifo->chid]) {
1137 *pobject = nv_object(priv->chan[fifo->chid]);
1138 atomic_inc(&(*pobject)->refcount);
1139 spin_unlock_irqrestore(&priv->lock, flags);
1140 nouveau_object_destroy(&chan->base);
1141 return 1;
1142 }
1143
1144 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1145
1146 priv->chan[fifo->chid] = chan;
1147 chan->chid = fifo->chid;
1148 spin_unlock_irqrestore(&priv->lock, flags);
1149 return 0;
1150}
1151
1152static void
1153nv04_graph_context_dtor(struct nouveau_object *object)
1154{
1155 struct nv04_graph_priv *priv = (void *)object->engine;
1156 struct nv04_graph_chan *chan = (void *)object;
1157 unsigned long flags;
1158
1159 spin_lock_irqsave(&priv->lock, flags);
1160 priv->chan[chan->chid] = NULL;
1161 spin_unlock_irqrestore(&priv->lock, flags);
1162
1163 nouveau_object_destroy(&chan->base);
1164}
1165
1166static int
1167nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
1168{
1169 struct nv04_graph_priv *priv = (void *)object->engine;
1170 struct nv04_graph_chan *chan = (void *)object;
1171 unsigned long flags;
1172
1173 spin_lock_irqsave(&priv->lock, flags);
1174 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1175 if (nv04_graph_channel(priv) == chan)
1176 nv04_graph_unload_context(chan);
1177 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1178 spin_unlock_irqrestore(&priv->lock, flags);
1179
1180 return nouveau_object_fini(&chan->base, suspend);
1181}
1182
1183static struct nouveau_oclass
1184nv04_graph_cclass = {
1185 .handle = NV_ENGCTX(GR, 0x04),
1186 .ofuncs = &(struct nouveau_ofuncs) {
1187 .ctor = nv04_graph_context_ctor,
1188 .dtor = nv04_graph_context_dtor,
1189 .init = nouveau_object_init,
1190 .fini = nv04_graph_context_fini,
1191 },
1192};
1193
1194/*******************************************************************************
1195 * PGRAPH engine/subdev functions
1196 ******************************************************************************/
1197
1198bool
1199nv04_graph_idle(void *obj)
1200{
1201 struct nouveau_graph *graph = nouveau_graph(obj);
1202 u32 mask = 0xffffffff;
1203
1204 if (nv_device(obj)->card_type == NV_40)
1205 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1206
1207 if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
1208 nv_error(graph, "idle timed out with status 0x%08x\n",
1209 nv_rd32(graph, NV04_PGRAPH_STATUS));
1210 return false;
1211 }
1212
1213 return true;
1214}
1215
1216static const struct nouveau_bitfield
1217nv04_graph_intr_name[] = {
1218 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1219 {}
1220};
1221
1222static const struct nouveau_bitfield
1223nv04_graph_nstatus[] = {
1224 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1225 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1226 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1227 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1228 {}
1229};
1230
1231const struct nouveau_bitfield
1232nv04_graph_nsource[] = {
1233 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1234 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1235 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1236 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1237 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1238 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1239 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1240 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1241 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1242 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1243 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1244 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1245 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1246 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1247 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1248 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1249 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1250 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1251 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1252 {}
1253};
1254
1255static void
1256nv04_graph_intr(struct nouveau_subdev *subdev)
1257{
1258 struct nv04_graph_priv *priv = (void *)subdev;
1259 struct nv04_graph_chan *chan = NULL;
1260 struct nouveau_namedb *namedb = NULL;
1261 struct nouveau_handle *handle = NULL;
1262 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
1263 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
1264 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
1265 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1266 u32 chid = (addr & 0x0f000000) >> 24;
1267 u32 subc = (addr & 0x0000e000) >> 13;
1268 u32 mthd = (addr & 0x00001ffc);
1269 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1270 u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
1271 u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
1272 u32 show = stat;
1273 unsigned long flags;
1274
1275 spin_lock_irqsave(&priv->lock, flags);
1276 chan = priv->chan[chid];
1277 if (chan)
1278 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1279 spin_unlock_irqrestore(&priv->lock, flags);
1280
1281 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1282 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1283 handle = nouveau_namedb_get_vinst(namedb, inst);
1284 if (handle && !nv_call(handle->object, mthd, data))
1285 show &= ~NV_PGRAPH_INTR_NOTIFY;
1286 }
1287 }
1288
1289 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1290 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1291 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1292 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1293 nv04_graph_context_switch(priv);
1294 }
1295
1296 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1297 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1298
1299 if (show) {
1300 nv_error(priv, "");
1301 nouveau_bitfield_print(nv04_graph_intr_name, show);
1302 printk(" nsource:");
1303 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1304 printk(" nstatus:");
1305 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1306 printk("\n");
1307 nv_error(priv, "ch %d/%d class 0x%04x "
1308 "mthd 0x%04x data 0x%08x\n",
1309 chid, subc, class, mthd, data);
1310 }
1311
1312 nouveau_namedb_put(handle);
1313}
1314
1315static int
1316nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1317 struct nouveau_oclass *oclass, void *data, u32 size,
1318 struct nouveau_object **pobject)
1319{
1320 struct nv04_graph_priv *priv;
1321 int ret;
1322
1323 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1324 *pobject = nv_object(priv);
1325 if (ret)
1326 return ret;
1327
1328 nv_subdev(priv)->unit = 0x00001000;
1329 nv_subdev(priv)->intr = nv04_graph_intr;
1330 nv_engine(priv)->cclass = &nv04_graph_cclass;
1331 nv_engine(priv)->sclass = nv04_graph_sclass;
1332 spin_lock_init(&priv->lock);
1333 return 0;
1334}
1335
1336static int
1337nv04_graph_init(struct nouveau_object *object)
1338{
1339 struct nouveau_engine *engine = nv_engine(object);
1340 struct nv04_graph_priv *priv = (void *)engine;
1341 int ret;
1342
1343 ret = nouveau_graph_init(&priv->base);
1344 if (ret)
1345 return ret;
1346
1347 /* Enable PGRAPH interrupts */
1348 nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1349 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1350
1351 nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
1352 nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
1353 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1354 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1355 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1356 /*1231C000 blob, 001 haiku*/
1357 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1358 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
1359 /*0x72111100 blob , 01 haiku*/
1360 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1361 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1362 /*haiku same*/
1363
1364 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1365 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1366 /*haiku and blob 10d4*/
1367
1368 nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF);
1369 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
1370 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1371
1372 /* These don't belong here, they're part of a per-channel context */
1373 nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1374 nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
1375 return 0;
1376}
1377
1378struct nouveau_oclass
1379nv04_graph_oclass = {
1380 .handle = NV_ENGINE(GR, 0x04),
1381 .ofuncs = &(struct nouveau_ofuncs) {
1382 .ctor = nv04_graph_ctor,
1383 .dtor = _nouveau_graph_dtor,
1384 .init = nv04_graph_init,
1385 .fini = _nouveau_graph_fini,
1386 },
1387};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
new file mode 100644
index 000000000000..92521c89e77f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -0,0 +1,1314 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28
29#include <subdev/fb.h>
30
31#include <engine/fifo.h>
32#include <engine/graph.h>
33
34#include "regs.h"
35
36struct pipe_state {
37 u32 pipe_0x0000[0x040/4];
38 u32 pipe_0x0040[0x010/4];
39 u32 pipe_0x0200[0x0c0/4];
40 u32 pipe_0x4400[0x080/4];
41 u32 pipe_0x6400[0x3b0/4];
42 u32 pipe_0x6800[0x2f0/4];
43 u32 pipe_0x6c00[0x030/4];
44 u32 pipe_0x7000[0x130/4];
45 u32 pipe_0x7400[0x0c0/4];
46 u32 pipe_0x7800[0x0c0/4];
47};
48
49static int nv10_graph_ctx_regs[] = {
50 NV10_PGRAPH_CTX_SWITCH(0),
51 NV10_PGRAPH_CTX_SWITCH(1),
52 NV10_PGRAPH_CTX_SWITCH(2),
53 NV10_PGRAPH_CTX_SWITCH(3),
54 NV10_PGRAPH_CTX_SWITCH(4),
55 NV10_PGRAPH_CTX_CACHE(0, 0),
56 NV10_PGRAPH_CTX_CACHE(0, 1),
57 NV10_PGRAPH_CTX_CACHE(0, 2),
58 NV10_PGRAPH_CTX_CACHE(0, 3),
59 NV10_PGRAPH_CTX_CACHE(0, 4),
60 NV10_PGRAPH_CTX_CACHE(1, 0),
61 NV10_PGRAPH_CTX_CACHE(1, 1),
62 NV10_PGRAPH_CTX_CACHE(1, 2),
63 NV10_PGRAPH_CTX_CACHE(1, 3),
64 NV10_PGRAPH_CTX_CACHE(1, 4),
65 NV10_PGRAPH_CTX_CACHE(2, 0),
66 NV10_PGRAPH_CTX_CACHE(2, 1),
67 NV10_PGRAPH_CTX_CACHE(2, 2),
68 NV10_PGRAPH_CTX_CACHE(2, 3),
69 NV10_PGRAPH_CTX_CACHE(2, 4),
70 NV10_PGRAPH_CTX_CACHE(3, 0),
71 NV10_PGRAPH_CTX_CACHE(3, 1),
72 NV10_PGRAPH_CTX_CACHE(3, 2),
73 NV10_PGRAPH_CTX_CACHE(3, 3),
74 NV10_PGRAPH_CTX_CACHE(3, 4),
75 NV10_PGRAPH_CTX_CACHE(4, 0),
76 NV10_PGRAPH_CTX_CACHE(4, 1),
77 NV10_PGRAPH_CTX_CACHE(4, 2),
78 NV10_PGRAPH_CTX_CACHE(4, 3),
79 NV10_PGRAPH_CTX_CACHE(4, 4),
80 NV10_PGRAPH_CTX_CACHE(5, 0),
81 NV10_PGRAPH_CTX_CACHE(5, 1),
82 NV10_PGRAPH_CTX_CACHE(5, 2),
83 NV10_PGRAPH_CTX_CACHE(5, 3),
84 NV10_PGRAPH_CTX_CACHE(5, 4),
85 NV10_PGRAPH_CTX_CACHE(6, 0),
86 NV10_PGRAPH_CTX_CACHE(6, 1),
87 NV10_PGRAPH_CTX_CACHE(6, 2),
88 NV10_PGRAPH_CTX_CACHE(6, 3),
89 NV10_PGRAPH_CTX_CACHE(6, 4),
90 NV10_PGRAPH_CTX_CACHE(7, 0),
91 NV10_PGRAPH_CTX_CACHE(7, 1),
92 NV10_PGRAPH_CTX_CACHE(7, 2),
93 NV10_PGRAPH_CTX_CACHE(7, 3),
94 NV10_PGRAPH_CTX_CACHE(7, 4),
95 NV10_PGRAPH_CTX_USER,
96 NV04_PGRAPH_DMA_START_0,
97 NV04_PGRAPH_DMA_START_1,
98 NV04_PGRAPH_DMA_LENGTH,
99 NV04_PGRAPH_DMA_MISC,
100 NV10_PGRAPH_DMA_PITCH,
101 NV04_PGRAPH_BOFFSET0,
102 NV04_PGRAPH_BBASE0,
103 NV04_PGRAPH_BLIMIT0,
104 NV04_PGRAPH_BOFFSET1,
105 NV04_PGRAPH_BBASE1,
106 NV04_PGRAPH_BLIMIT1,
107 NV04_PGRAPH_BOFFSET2,
108 NV04_PGRAPH_BBASE2,
109 NV04_PGRAPH_BLIMIT2,
110 NV04_PGRAPH_BOFFSET3,
111 NV04_PGRAPH_BBASE3,
112 NV04_PGRAPH_BLIMIT3,
113 NV04_PGRAPH_BOFFSET4,
114 NV04_PGRAPH_BBASE4,
115 NV04_PGRAPH_BLIMIT4,
116 NV04_PGRAPH_BOFFSET5,
117 NV04_PGRAPH_BBASE5,
118 NV04_PGRAPH_BLIMIT5,
119 NV04_PGRAPH_BPITCH0,
120 NV04_PGRAPH_BPITCH1,
121 NV04_PGRAPH_BPITCH2,
122 NV04_PGRAPH_BPITCH3,
123 NV04_PGRAPH_BPITCH4,
124 NV10_PGRAPH_SURFACE,
125 NV10_PGRAPH_STATE,
126 NV04_PGRAPH_BSWIZZLE2,
127 NV04_PGRAPH_BSWIZZLE5,
128 NV04_PGRAPH_BPIXEL,
129 NV10_PGRAPH_NOTIFY,
130 NV04_PGRAPH_PATT_COLOR0,
131 NV04_PGRAPH_PATT_COLOR1,
132 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
133 0x00400904,
134 0x00400908,
135 0x0040090c,
136 0x00400910,
137 0x00400914,
138 0x00400918,
139 0x0040091c,
140 0x00400920,
141 0x00400924,
142 0x00400928,
143 0x0040092c,
144 0x00400930,
145 0x00400934,
146 0x00400938,
147 0x0040093c,
148 0x00400940,
149 0x00400944,
150 0x00400948,
151 0x0040094c,
152 0x00400950,
153 0x00400954,
154 0x00400958,
155 0x0040095c,
156 0x00400960,
157 0x00400964,
158 0x00400968,
159 0x0040096c,
160 0x00400970,
161 0x00400974,
162 0x00400978,
163 0x0040097c,
164 0x00400980,
165 0x00400984,
166 0x00400988,
167 0x0040098c,
168 0x00400990,
169 0x00400994,
170 0x00400998,
171 0x0040099c,
172 0x004009a0,
173 0x004009a4,
174 0x004009a8,
175 0x004009ac,
176 0x004009b0,
177 0x004009b4,
178 0x004009b8,
179 0x004009bc,
180 0x004009c0,
181 0x004009c4,
182 0x004009c8,
183 0x004009cc,
184 0x004009d0,
185 0x004009d4,
186 0x004009d8,
187 0x004009dc,
188 0x004009e0,
189 0x004009e4,
190 0x004009e8,
191 0x004009ec,
192 0x004009f0,
193 0x004009f4,
194 0x004009f8,
195 0x004009fc,
196 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
197 0x0040080c,
198 NV04_PGRAPH_PATTERN_SHAPE,
199 NV03_PGRAPH_MONO_COLOR0,
200 NV04_PGRAPH_ROP3,
201 NV04_PGRAPH_CHROMA,
202 NV04_PGRAPH_BETA_AND,
203 NV04_PGRAPH_BETA_PREMULT,
204 0x00400e70,
205 0x00400e74,
206 0x00400e78,
207 0x00400e7c,
208 0x00400e80,
209 0x00400e84,
210 0x00400e88,
211 0x00400e8c,
212 0x00400ea0,
213 0x00400ea4,
214 0x00400ea8,
215 0x00400e90,
216 0x00400e94,
217 0x00400e98,
218 0x00400e9c,
219 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
220 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
221 0x00400f04,
222 0x00400f24,
223 0x00400f08,
224 0x00400f28,
225 0x00400f0c,
226 0x00400f2c,
227 0x00400f10,
228 0x00400f30,
229 0x00400f14,
230 0x00400f34,
231 0x00400f18,
232 0x00400f38,
233 0x00400f1c,
234 0x00400f3c,
235 NV10_PGRAPH_XFMODE0,
236 NV10_PGRAPH_XFMODE1,
237 NV10_PGRAPH_GLOBALSTATE0,
238 NV10_PGRAPH_GLOBALSTATE1,
239 NV04_PGRAPH_STORED_FMT,
240 NV04_PGRAPH_SOURCE_COLOR,
241 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
242 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
243 0x00400404,
244 0x00400484,
245 0x00400408,
246 0x00400488,
247 0x0040040c,
248 0x0040048c,
249 0x00400410,
250 0x00400490,
251 0x00400414,
252 0x00400494,
253 0x00400418,
254 0x00400498,
255 0x0040041c,
256 0x0040049c,
257 0x00400420,
258 0x004004a0,
259 0x00400424,
260 0x004004a4,
261 0x00400428,
262 0x004004a8,
263 0x0040042c,
264 0x004004ac,
265 0x00400430,
266 0x004004b0,
267 0x00400434,
268 0x004004b4,
269 0x00400438,
270 0x004004b8,
271 0x0040043c,
272 0x004004bc,
273 0x00400440,
274 0x004004c0,
275 0x00400444,
276 0x004004c4,
277 0x00400448,
278 0x004004c8,
279 0x0040044c,
280 0x004004cc,
281 0x00400450,
282 0x004004d0,
283 0x00400454,
284 0x004004d4,
285 0x00400458,
286 0x004004d8,
287 0x0040045c,
288 0x004004dc,
289 0x00400460,
290 0x004004e0,
291 0x00400464,
292 0x004004e4,
293 0x00400468,
294 0x004004e8,
295 0x0040046c,
296 0x004004ec,
297 0x00400470,
298 0x004004f0,
299 0x00400474,
300 0x004004f4,
301 0x00400478,
302 0x004004f8,
303 0x0040047c,
304 0x004004fc,
305 NV03_PGRAPH_ABS_UCLIP_XMIN,
306 NV03_PGRAPH_ABS_UCLIP_XMAX,
307 NV03_PGRAPH_ABS_UCLIP_YMIN,
308 NV03_PGRAPH_ABS_UCLIP_YMAX,
309 0x00400550,
310 0x00400558,
311 0x00400554,
312 0x0040055c,
313 NV03_PGRAPH_ABS_UCLIPA_XMIN,
314 NV03_PGRAPH_ABS_UCLIPA_XMAX,
315 NV03_PGRAPH_ABS_UCLIPA_YMIN,
316 NV03_PGRAPH_ABS_UCLIPA_YMAX,
317 NV03_PGRAPH_ABS_ICLIP_XMAX,
318 NV03_PGRAPH_ABS_ICLIP_YMAX,
319 NV03_PGRAPH_XY_LOGIC_MISC0,
320 NV03_PGRAPH_XY_LOGIC_MISC1,
321 NV03_PGRAPH_XY_LOGIC_MISC2,
322 NV03_PGRAPH_XY_LOGIC_MISC3,
323 NV03_PGRAPH_CLIPX_0,
324 NV03_PGRAPH_CLIPX_1,
325 NV03_PGRAPH_CLIPY_0,
326 NV03_PGRAPH_CLIPY_1,
327 NV10_PGRAPH_COMBINER0_IN_ALPHA,
328 NV10_PGRAPH_COMBINER1_IN_ALPHA,
329 NV10_PGRAPH_COMBINER0_IN_RGB,
330 NV10_PGRAPH_COMBINER1_IN_RGB,
331 NV10_PGRAPH_COMBINER_COLOR0,
332 NV10_PGRAPH_COMBINER_COLOR1,
333 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
334 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
335 NV10_PGRAPH_COMBINER0_OUT_RGB,
336 NV10_PGRAPH_COMBINER1_OUT_RGB,
337 NV10_PGRAPH_COMBINER_FINAL0,
338 NV10_PGRAPH_COMBINER_FINAL1,
339 0x00400e00,
340 0x00400e04,
341 0x00400e08,
342 0x00400e0c,
343 0x00400e10,
344 0x00400e14,
345 0x00400e18,
346 0x00400e1c,
347 0x00400e20,
348 0x00400e24,
349 0x00400e28,
350 0x00400e2c,
351 0x00400e30,
352 0x00400e34,
353 0x00400e38,
354 0x00400e3c,
355 NV04_PGRAPH_PASSTHRU_0,
356 NV04_PGRAPH_PASSTHRU_1,
357 NV04_PGRAPH_PASSTHRU_2,
358 NV10_PGRAPH_DIMX_TEXTURE,
359 NV10_PGRAPH_WDIMX_TEXTURE,
360 NV10_PGRAPH_DVD_COLORFMT,
361 NV10_PGRAPH_SCALED_FORMAT,
362 NV04_PGRAPH_MISC24_0,
363 NV04_PGRAPH_MISC24_1,
364 NV04_PGRAPH_MISC24_2,
365 NV03_PGRAPH_X_MISC,
366 NV03_PGRAPH_Y_MISC,
367 NV04_PGRAPH_VALID1,
368 NV04_PGRAPH_VALID2,
369};
370
371static int nv17_graph_ctx_regs[] = {
372 NV10_PGRAPH_DEBUG_4,
373 0x004006b0,
374 0x00400eac,
375 0x00400eb0,
376 0x00400eb4,
377 0x00400eb8,
378 0x00400ebc,
379 0x00400ec0,
380 0x00400ec4,
381 0x00400ec8,
382 0x00400ecc,
383 0x00400ed0,
384 0x00400ed4,
385 0x00400ed8,
386 0x00400edc,
387 0x00400ee0,
388 0x00400a00,
389 0x00400a04,
390};
391
392struct nv10_graph_priv {
393 struct nouveau_graph base;
394 struct nv10_graph_chan *chan[32];
395 spinlock_t lock;
396};
397
398struct nv10_graph_chan {
399 struct nouveau_object base;
400 int chid;
401 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
402 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
403 struct pipe_state pipe_state;
404 u32 lma_window[4];
405};
406
407
408static inline struct nv10_graph_priv *
409nv10_graph_priv(struct nv10_graph_chan *chan)
410{
411 return (void *)nv_object(chan)->engine;
412}
413
414/*******************************************************************************
415 * Graphics object classes
416 ******************************************************************************/
417
418#define PIPE_SAVE(priv, state, addr) \
419 do { \
420 int __i; \
421 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
422 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
423 state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
424 } while (0)
425
426#define PIPE_RESTORE(priv, state, addr) \
427 do { \
428 int __i; \
429 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
430 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
431 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
432 } while (0)
433
434static struct nouveau_oclass
435nv10_graph_sclass[] = {
436 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
437 { 0x0019, &nv04_graph_ofuncs }, /* clip */
438 { 0x0030, &nv04_graph_ofuncs }, /* null */
439 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
440 { 0x0043, &nv04_graph_ofuncs }, /* rop */
441 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
442 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
443 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
444 { 0x005f, &nv04_graph_ofuncs }, /* blit */
445 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
446 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
447 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
448 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
449 { 0x009f, &nv04_graph_ofuncs }, /* blit */
450 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
451 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
452 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
453 { 0x0056, &nv04_graph_ofuncs }, /* celcius */
454 {},
455};
456
457static struct nouveau_oclass
458nv15_graph_sclass[] = {
459 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
460 { 0x0019, &nv04_graph_ofuncs }, /* clip */
461 { 0x0030, &nv04_graph_ofuncs }, /* null */
462 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
463 { 0x0043, &nv04_graph_ofuncs }, /* rop */
464 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
465 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
466 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
467 { 0x005f, &nv04_graph_ofuncs }, /* blit */
468 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
469 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
470 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
471 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
472 { 0x009f, &nv04_graph_ofuncs }, /* blit */
473 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
474 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
475 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
476 { 0x0096, &nv04_graph_ofuncs }, /* celcius */
477 {},
478};
479
480static int
481nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
482 void *args, u32 size)
483{
484 struct nv10_graph_chan *chan = (void *)object->parent;
485 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
486 struct pipe_state *pipe = &chan->pipe_state;
487 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
488 u32 xfmode0, xfmode1;
489 u32 data = *(u32 *)args;
490 int i;
491
492 chan->lma_window[(mthd - 0x1638) / 4] = data;
493
494 if (mthd != 0x1644)
495 return 0;
496
497 nv04_graph_idle(priv);
498
499 PIPE_SAVE(priv, pipe_0x0040, 0x0040);
500 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
501
502 PIPE_RESTORE(priv, chan->lma_window, 0x6790);
503
504 nv04_graph_idle(priv);
505
506 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
507 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
508
509 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
510 PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
511 PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
512 PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
513
514 nv04_graph_idle(priv);
515
516 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
517 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
518 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
519 for (i = 0; i < 4; i++)
520 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
521 for (i = 0; i < 4; i++)
522 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
523
524 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
525 for (i = 0; i < 3; i++)
526 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
527
528 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
529 for (i = 0; i < 3; i++)
530 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
531
532 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
533 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
534
535 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
536
537 nv04_graph_idle(priv);
538
539 PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
540
541 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
542 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
543
544 PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
545 PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
546 PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
547 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
548
549 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
550 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
551
552 nv04_graph_idle(priv);
553
554 return 0;
555}
556
557static int
558nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
559 void *args, u32 size)
560{
561 struct nv10_graph_chan *chan = (void *)object->parent;
562 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
563
564 nv04_graph_idle(priv);
565
566 nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
567 nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
568 return 0;
569}
570
571static struct nouveau_omthds
572nv17_celcius_omthds[] = {
573 { 0x1638, nv17_graph_mthd_lma_window },
574 { 0x163c, nv17_graph_mthd_lma_window },
575 { 0x1640, nv17_graph_mthd_lma_window },
576 { 0x1644, nv17_graph_mthd_lma_window },
577 { 0x1658, nv17_graph_mthd_lma_enable },
578 {}
579};
580
581static struct nouveau_oclass
582nv17_graph_sclass[] = {
583 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
584 { 0x0019, &nv04_graph_ofuncs }, /* clip */
585 { 0x0030, &nv04_graph_ofuncs }, /* null */
586 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
587 { 0x0043, &nv04_graph_ofuncs }, /* rop */
588 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
589 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
590 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
591 { 0x005f, &nv04_graph_ofuncs }, /* blit */
592 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
593 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
594 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
595 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
596 { 0x009f, &nv04_graph_ofuncs }, /* blit */
597 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
598 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
599 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
600 { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
601 {},
602};
603
604/*******************************************************************************
605 * PGRAPH context
606 ******************************************************************************/
607
608static struct nv10_graph_chan *
609nv10_graph_channel(struct nv10_graph_priv *priv)
610{
611 struct nv10_graph_chan *chan = NULL;
612 if (nv_rd32(priv, 0x400144) & 0x00010000) {
613 int chid = nv_rd32(priv, 0x400148) >> 24;
614 if (chid < ARRAY_SIZE(priv->chan))
615 chan = priv->chan[chid];
616 }
617 return chan;
618}
619
620static void
621nv10_graph_save_pipe(struct nv10_graph_chan *chan)
622{
623 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
624 struct pipe_state *pipe = &chan->pipe_state;
625
626 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
627 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
628 PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
629 PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
630 PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
631 PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
632 PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
633 PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
634 PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
635 PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
636}
637
638static void
639nv10_graph_load_pipe(struct nv10_graph_chan *chan)
640{
641 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
642 struct pipe_state *pipe = &chan->pipe_state;
643 u32 xfmode0, xfmode1;
644 int i;
645
646 nv04_graph_idle(priv);
647 /* XXX check haiku comments */
648 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
649 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
650 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
651 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
652 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
653 for (i = 0; i < 4; i++)
654 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
655 for (i = 0; i < 4; i++)
656 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
657
658 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
659 for (i = 0; i < 3; i++)
660 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
661
662 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
663 for (i = 0; i < 3; i++)
664 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
665
666 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
667 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
668
669
670 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
671 nv04_graph_idle(priv);
672
673 /* restore XFMODE */
674 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
675 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
676 PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
677 PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
678 PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
679 PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
680 PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
681 PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
682 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
683 PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
684 PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
685 nv04_graph_idle(priv);
686}
687
688static void
689nv10_graph_create_pipe(struct nv10_graph_chan *chan)
690{
691 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
692 struct pipe_state *pipe_state = &chan->pipe_state;
693 u32 *pipe_state_addr;
694 int i;
695#define PIPE_INIT(addr) \
696 do { \
697 pipe_state_addr = pipe_state->pipe_##addr; \
698 } while (0)
699#define PIPE_INIT_END(addr) \
700 do { \
701 u32 *__end_addr = pipe_state->pipe_##addr + \
702 ARRAY_SIZE(pipe_state->pipe_##addr); \
703 if (pipe_state_addr != __end_addr) \
704 nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \
705 addr, pipe_state_addr, __end_addr); \
706 } while (0)
707#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
708
709 PIPE_INIT(0x0200);
710 for (i = 0; i < 48; i++)
711 NV_WRITE_PIPE_INIT(0x00000000);
712 PIPE_INIT_END(0x0200);
713
714 PIPE_INIT(0x6400);
715 for (i = 0; i < 211; i++)
716 NV_WRITE_PIPE_INIT(0x00000000);
717 NV_WRITE_PIPE_INIT(0x3f800000);
718 NV_WRITE_PIPE_INIT(0x40000000);
719 NV_WRITE_PIPE_INIT(0x40000000);
720 NV_WRITE_PIPE_INIT(0x40000000);
721 NV_WRITE_PIPE_INIT(0x40000000);
722 NV_WRITE_PIPE_INIT(0x00000000);
723 NV_WRITE_PIPE_INIT(0x00000000);
724 NV_WRITE_PIPE_INIT(0x3f800000);
725 NV_WRITE_PIPE_INIT(0x00000000);
726 NV_WRITE_PIPE_INIT(0x3f000000);
727 NV_WRITE_PIPE_INIT(0x3f000000);
728 NV_WRITE_PIPE_INIT(0x00000000);
729 NV_WRITE_PIPE_INIT(0x00000000);
730 NV_WRITE_PIPE_INIT(0x00000000);
731 NV_WRITE_PIPE_INIT(0x00000000);
732 NV_WRITE_PIPE_INIT(0x3f800000);
733 NV_WRITE_PIPE_INIT(0x00000000);
734 NV_WRITE_PIPE_INIT(0x00000000);
735 NV_WRITE_PIPE_INIT(0x00000000);
736 NV_WRITE_PIPE_INIT(0x00000000);
737 NV_WRITE_PIPE_INIT(0x00000000);
738 NV_WRITE_PIPE_INIT(0x3f800000);
739 NV_WRITE_PIPE_INIT(0x3f800000);
740 NV_WRITE_PIPE_INIT(0x3f800000);
741 NV_WRITE_PIPE_INIT(0x3f800000);
742 PIPE_INIT_END(0x6400);
743
744 PIPE_INIT(0x6800);
745 for (i = 0; i < 162; i++)
746 NV_WRITE_PIPE_INIT(0x00000000);
747 NV_WRITE_PIPE_INIT(0x3f800000);
748 for (i = 0; i < 25; i++)
749 NV_WRITE_PIPE_INIT(0x00000000);
750 PIPE_INIT_END(0x6800);
751
752 PIPE_INIT(0x6c00);
753 NV_WRITE_PIPE_INIT(0x00000000);
754 NV_WRITE_PIPE_INIT(0x00000000);
755 NV_WRITE_PIPE_INIT(0x00000000);
756 NV_WRITE_PIPE_INIT(0x00000000);
757 NV_WRITE_PIPE_INIT(0xbf800000);
758 NV_WRITE_PIPE_INIT(0x00000000);
759 NV_WRITE_PIPE_INIT(0x00000000);
760 NV_WRITE_PIPE_INIT(0x00000000);
761 NV_WRITE_PIPE_INIT(0x00000000);
762 NV_WRITE_PIPE_INIT(0x00000000);
763 NV_WRITE_PIPE_INIT(0x00000000);
764 NV_WRITE_PIPE_INIT(0x00000000);
765 PIPE_INIT_END(0x6c00);
766
767 PIPE_INIT(0x7000);
768 NV_WRITE_PIPE_INIT(0x00000000);
769 NV_WRITE_PIPE_INIT(0x00000000);
770 NV_WRITE_PIPE_INIT(0x00000000);
771 NV_WRITE_PIPE_INIT(0x00000000);
772 NV_WRITE_PIPE_INIT(0x00000000);
773 NV_WRITE_PIPE_INIT(0x00000000);
774 NV_WRITE_PIPE_INIT(0x00000000);
775 NV_WRITE_PIPE_INIT(0x00000000);
776 NV_WRITE_PIPE_INIT(0x00000000);
777 NV_WRITE_PIPE_INIT(0x00000000);
778 NV_WRITE_PIPE_INIT(0x00000000);
779 NV_WRITE_PIPE_INIT(0x00000000);
780 NV_WRITE_PIPE_INIT(0x7149f2ca);
781 NV_WRITE_PIPE_INIT(0x00000000);
782 NV_WRITE_PIPE_INIT(0x00000000);
783 NV_WRITE_PIPE_INIT(0x00000000);
784 NV_WRITE_PIPE_INIT(0x7149f2ca);
785 NV_WRITE_PIPE_INIT(0x00000000);
786 NV_WRITE_PIPE_INIT(0x00000000);
787 NV_WRITE_PIPE_INIT(0x00000000);
788 NV_WRITE_PIPE_INIT(0x7149f2ca);
789 NV_WRITE_PIPE_INIT(0x00000000);
790 NV_WRITE_PIPE_INIT(0x00000000);
791 NV_WRITE_PIPE_INIT(0x00000000);
792 NV_WRITE_PIPE_INIT(0x7149f2ca);
793 NV_WRITE_PIPE_INIT(0x00000000);
794 NV_WRITE_PIPE_INIT(0x00000000);
795 NV_WRITE_PIPE_INIT(0x00000000);
796 NV_WRITE_PIPE_INIT(0x7149f2ca);
797 NV_WRITE_PIPE_INIT(0x00000000);
798 NV_WRITE_PIPE_INIT(0x00000000);
799 NV_WRITE_PIPE_INIT(0x00000000);
800 NV_WRITE_PIPE_INIT(0x7149f2ca);
801 NV_WRITE_PIPE_INIT(0x00000000);
802 NV_WRITE_PIPE_INIT(0x00000000);
803 NV_WRITE_PIPE_INIT(0x00000000);
804 NV_WRITE_PIPE_INIT(0x7149f2ca);
805 NV_WRITE_PIPE_INIT(0x00000000);
806 NV_WRITE_PIPE_INIT(0x00000000);
807 NV_WRITE_PIPE_INIT(0x00000000);
808 NV_WRITE_PIPE_INIT(0x7149f2ca);
809 for (i = 0; i < 35; i++)
810 NV_WRITE_PIPE_INIT(0x00000000);
811 PIPE_INIT_END(0x7000);
812
813 PIPE_INIT(0x7400);
814 for (i = 0; i < 48; i++)
815 NV_WRITE_PIPE_INIT(0x00000000);
816 PIPE_INIT_END(0x7400);
817
818 PIPE_INIT(0x7800);
819 for (i = 0; i < 48; i++)
820 NV_WRITE_PIPE_INIT(0x00000000);
821 PIPE_INIT_END(0x7800);
822
823 PIPE_INIT(0x4400);
824 for (i = 0; i < 32; i++)
825 NV_WRITE_PIPE_INIT(0x00000000);
826 PIPE_INIT_END(0x4400);
827
828 PIPE_INIT(0x0000);
829 for (i = 0; i < 16; i++)
830 NV_WRITE_PIPE_INIT(0x00000000);
831 PIPE_INIT_END(0x0000);
832
833 PIPE_INIT(0x0040);
834 for (i = 0; i < 4; i++)
835 NV_WRITE_PIPE_INIT(0x00000000);
836 PIPE_INIT_END(0x0040);
837
838#undef PIPE_INIT
839#undef PIPE_INIT_END
840#undef NV_WRITE_PIPE_INIT
841}
842
843static int
844nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
845{
846 int i;
847 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
848 if (nv10_graph_ctx_regs[i] == reg)
849 return i;
850 }
851 nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
852 return -1;
853}
854
855static int
856nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
857{
858 int i;
859 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
860 if (nv17_graph_ctx_regs[i] == reg)
861 return i;
862 }
863 nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
864 return -1;
865}
866
867static void
868nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
869{
870 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
871 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
872 u32 ctx_user, ctx_switch[5];
873 int i, subchan = -1;
874
875 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
876 * that cannot be restored via MMIO. Do it through the FIFO
877 * instead.
878 */
879
880 /* Look for a celsius object */
881 for (i = 0; i < 8; i++) {
882 int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
883
884 if (class == 0x56 || class == 0x96 || class == 0x99) {
885 subchan = i;
886 break;
887 }
888 }
889
890 if (subchan < 0 || !inst)
891 return;
892
893 /* Save the current ctx object */
894 ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
895 for (i = 0; i < 5; i++)
896 ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
897
898 /* Save the FIFO state */
899 st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
900 st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
901 st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
902 fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
903
904 for (i = 0; i < ARRAY_SIZE(fifo); i++)
905 fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
906
907 /* Switch to the celsius subchannel */
908 for (i = 0; i < 5; i++)
909 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
910 nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
911 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
912
913 /* Inject NV10TCL_DMA_VTXBUF */
914 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
915 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
916 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
917 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
918 nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
919 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
920 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
921
922 /* Restore the FIFO state */
923 for (i = 0; i < ARRAY_SIZE(fifo); i++)
924 nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
925
926 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
927 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
928 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
929 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
930
931 /* Restore the current ctx object */
932 for (i = 0; i < 5; i++)
933 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
934 nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
935}
936
937static int
938nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
939{
940 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
941 u32 inst;
942 int i;
943
944 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
945 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
946
947 if (nv_device(priv)->chipset >= 0x17) {
948 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
949 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
950 }
951
952 nv10_graph_load_pipe(chan);
953
954 inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
955 nv10_graph_load_dma_vtxbuf(chan, chid, inst);
956
957 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
958 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
959 nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
960 return 0;
961}
962
963static int
964nv10_graph_unload_context(struct nv10_graph_chan *chan)
965{
966 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
967 int i;
968
969 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
970 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
971
972 if (nv_device(priv)->chipset >= 0x17) {
973 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
974 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
975 }
976
977 nv10_graph_save_pipe(chan);
978
979 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
980 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
981 return 0;
982}
983
984static void
985nv10_graph_context_switch(struct nv10_graph_priv *priv)
986{
987 struct nv10_graph_chan *prev = NULL;
988 struct nv10_graph_chan *next = NULL;
989 unsigned long flags;
990 int chid;
991
992 spin_lock_irqsave(&priv->lock, flags);
993 nv04_graph_idle(priv);
994
995 /* If previous context is valid, we need to save it */
996 prev = nv10_graph_channel(priv);
997 if (prev)
998 nv10_graph_unload_context(prev);
999
1000 /* load context for next channel */
1001 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1002 next = priv->chan[chid];
1003 if (next)
1004 nv10_graph_load_context(next, chid);
1005
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007}
1008
1009#define NV_WRITE_CTX(reg, val) do { \
1010 int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
1011 if (offset > 0) \
1012 chan->nv10[offset] = val; \
1013 } while (0)
1014
1015#define NV17_WRITE_CTX(reg, val) do { \
1016 int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
1017 if (offset > 0) \
1018 chan->nv17[offset] = val; \
1019 } while (0)
1020
1021static int
1022nv10_graph_context_ctor(struct nouveau_object *parent,
1023 struct nouveau_object *engine,
1024 struct nouveau_oclass *oclass, void *data, u32 size,
1025 struct nouveau_object **pobject)
1026{
1027 struct nouveau_fifo_chan *fifo = (void *)parent;
1028 struct nv10_graph_priv *priv = (void *)engine;
1029 struct nv10_graph_chan *chan;
1030 unsigned long flags;
1031 int ret;
1032
1033 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
1034 *pobject = nv_object(chan);
1035 if (ret)
1036 return ret;
1037
1038 spin_lock_irqsave(&priv->lock, flags);
1039 if (priv->chan[fifo->chid]) {
1040 *pobject = nv_object(priv->chan[fifo->chid]);
1041 atomic_inc(&(*pobject)->refcount);
1042 spin_unlock_irqrestore(&priv->lock, flags);
1043 nouveau_object_destroy(&chan->base);
1044 return 1;
1045 }
1046
1047 NV_WRITE_CTX(0x00400e88, 0x08000000);
1048 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
1049 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
1050 NV_WRITE_CTX(0x00400e10, 0x00001000);
1051 NV_WRITE_CTX(0x00400e14, 0x00001000);
1052 NV_WRITE_CTX(0x00400e30, 0x00080008);
1053 NV_WRITE_CTX(0x00400e34, 0x00080008);
1054 if (nv_device(priv)->chipset >= 0x17) {
1055 /* is it really needed ??? */
1056 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1057 nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
1058 NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
1059 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1060 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1061 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
1062 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
1063 }
1064 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
1065
1066 nv10_graph_create_pipe(chan);
1067
1068 priv->chan[fifo->chid] = chan;
1069 chan->chid = fifo->chid;
1070 spin_unlock_irqrestore(&priv->lock, flags);
1071 return 0;
1072}
1073
1074static void
1075nv10_graph_context_dtor(struct nouveau_object *object)
1076{
1077 struct nv10_graph_priv *priv = (void *)object->engine;
1078 struct nv10_graph_chan *chan = (void *)object;
1079 unsigned long flags;
1080
1081 spin_lock_irqsave(&priv->lock, flags);
1082 priv->chan[chan->chid] = NULL;
1083 spin_unlock_irqrestore(&priv->lock, flags);
1084
1085 nouveau_object_destroy(&chan->base);
1086}
1087
1088static int
1089nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
1090{
1091 struct nv10_graph_priv *priv = (void *)object->engine;
1092 struct nv10_graph_chan *chan = (void *)object;
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&priv->lock, flags);
1096 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1097 if (nv10_graph_channel(priv) == chan)
1098 nv10_graph_unload_context(chan);
1099 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101
1102 return nouveau_object_fini(&chan->base, suspend);
1103}
1104
1105static struct nouveau_oclass
1106nv10_graph_cclass = {
1107 .handle = NV_ENGCTX(GR, 0x10),
1108 .ofuncs = &(struct nouveau_ofuncs) {
1109 .ctor = nv10_graph_context_ctor,
1110 .dtor = nv10_graph_context_dtor,
1111 .init = nouveau_object_init,
1112 .fini = nv10_graph_context_fini,
1113 },
1114};
1115
1116/*******************************************************************************
1117 * PGRAPH engine/subdev functions
1118 ******************************************************************************/
1119
1120static void
1121nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
1122{
1123 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
1124 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
1125 struct nv10_graph_priv *priv = (void *)engine;
1126 unsigned long flags;
1127
1128 pfifo->pause(pfifo, &flags);
1129 nv04_graph_idle(priv);
1130
1131 nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
1132 nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
1133 nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
1134
1135 pfifo->start(pfifo, &flags);
1136}
1137
1138const struct nouveau_bitfield nv10_graph_intr_name[] = {
1139 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1140 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1141 {}
1142};
1143
1144const struct nouveau_bitfield nv10_graph_nstatus[] = {
1145 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1146 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1147 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1148 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1149 {}
1150};
1151
1152static void
1153nv10_graph_intr(struct nouveau_subdev *subdev)
1154{
1155 struct nv10_graph_priv *priv = (void *)subdev;
1156 struct nv10_graph_chan *chan = NULL;
1157 struct nouveau_namedb *namedb = NULL;
1158 struct nouveau_handle *handle = NULL;
1159 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
1160 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
1161 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
1162 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1163 u32 chid = (addr & 0x01f00000) >> 20;
1164 u32 subc = (addr & 0x00070000) >> 16;
1165 u32 mthd = (addr & 0x00001ffc);
1166 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1167 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
1168 u32 show = stat;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&priv->lock, flags);
1172 chan = priv->chan[chid];
1173 if (chan)
1174 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1175 spin_unlock_irqrestore(&priv->lock, flags);
1176
1177 if (stat & NV_PGRAPH_INTR_ERROR) {
1178 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1179 handle = nouveau_namedb_get_class(namedb, class);
1180 if (handle && !nv_call(handle->object, mthd, data))
1181 show &= ~NV_PGRAPH_INTR_ERROR;
1182 }
1183 }
1184
1185 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1186 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1187 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1188 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1189 nv10_graph_context_switch(priv);
1190 }
1191
1192 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1193 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1194
1195 if (show) {
1196 nv_error(priv, "");
1197 nouveau_bitfield_print(nv10_graph_intr_name, show);
1198 printk(" nsource:");
1199 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1200 printk(" nstatus:");
1201 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1202 printk("\n");
1203 nv_error(priv, "ch %d/%d class 0x%04x "
1204 "mthd 0x%04x data 0x%08x\n",
1205 chid, subc, class, mthd, data);
1206 }
1207
1208 nouveau_namedb_put(handle);
1209}
1210
1211static int
1212nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1213 struct nouveau_oclass *oclass, void *data, u32 size,
1214 struct nouveau_object **pobject)
1215{
1216 struct nv10_graph_priv *priv;
1217 int ret;
1218
1219 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1220 *pobject = nv_object(priv);
1221 if (ret)
1222 return ret;
1223
1224 nv_subdev(priv)->unit = 0x00001000;
1225 nv_subdev(priv)->intr = nv10_graph_intr;
1226 nv_engine(priv)->cclass = &nv10_graph_cclass;
1227
1228 if (nv_device(priv)->chipset <= 0x10)
1229 nv_engine(priv)->sclass = nv10_graph_sclass;
1230 else
1231 if (nv_device(priv)->chipset < 0x17 ||
1232 nv_device(priv)->chipset == 0x1a)
1233 nv_engine(priv)->sclass = nv15_graph_sclass;
1234 else
1235 nv_engine(priv)->sclass = nv17_graph_sclass;
1236
1237 nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
1238 spin_lock_init(&priv->lock);
1239 return 0;
1240}
1241
1242static void
1243nv10_graph_dtor(struct nouveau_object *object)
1244{
1245 struct nv10_graph_priv *priv = (void *)object;
1246 nouveau_graph_destroy(&priv->base);
1247}
1248
1249static int
1250nv10_graph_init(struct nouveau_object *object)
1251{
1252 struct nouveau_engine *engine = nv_engine(object);
1253 struct nouveau_fb *pfb = nouveau_fb(object);
1254 struct nv10_graph_priv *priv = (void *)engine;
1255 int ret, i;
1256
1257 ret = nouveau_graph_init(&priv->base);
1258 if (ret)
1259 return ret;
1260
1261 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1262 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1263
1264 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1265 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
1266 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
1267 /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1268 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1269 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1270
1271 if (nv_device(priv)->chipset >= 0x17) {
1272 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1273 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1274 nv_wr32(priv, 0x400838, 0x002f8684);
1275 nv_wr32(priv, 0x40083c, 0x00115f3f);
1276 nv_wr32(priv, 0x4006b0, 0x40000020);
1277 } else {
1278 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
1279 }
1280
1281 /* Turn all the tiling regions off. */
1282 for (i = 0; i < pfb->tile.regions; i++)
1283 engine->tile_prog(engine, i);
1284
1285 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1286 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1287 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1288 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1289 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1290 nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1291
1292 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1293 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1294 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1295 return 0;
1296}
1297
1298static int
1299nv10_graph_fini(struct nouveau_object *object, bool suspend)
1300{
1301 struct nv10_graph_priv *priv = (void *)object;
1302 return nouveau_graph_fini(&priv->base, suspend);
1303}
1304
1305struct nouveau_oclass
1306nv10_graph_oclass = {
1307 .handle = NV_ENGINE(GR, 0x10),
1308 .ofuncs = &(struct nouveau_ofuncs) {
1309 .ctor = nv10_graph_ctor,
1310 .dtor = nv10_graph_dtor,
1311 .init = nv10_graph_init,
1312 .fini = nv10_graph_fini,
1313 },
1314};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
new file mode 100644
index 000000000000..8f3f619c4a78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -0,0 +1,381 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/handle.h>
5#include <core/enum.h>
6
7#include <subdev/timer.h>
8#include <subdev/fb.h>
9
10#include <engine/graph.h>
11#include <engine/fifo.h>
12
13#include "nv20.h"
14#include "regs.h"
15
16/*******************************************************************************
17 * Graphics object classes
18 ******************************************************************************/
19
20static struct nouveau_oclass
21nv20_graph_sclass[] = {
22 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
23 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
24 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
25 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
26 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
27 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
28 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
29 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
30 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
31 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
32 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
33 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
34 { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
36 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv20_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
54 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
55 &chan);
56 *pobject = nv_object(chan);
57 if (ret)
58 return ret;
59
60 chan->chid = nouveau_fifo_chan(parent)->chid;
61
62 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
63 nv_wo32(chan, 0x033c, 0xffff0000);
64 nv_wo32(chan, 0x03a0, 0x0fff0000);
65 nv_wo32(chan, 0x03a4, 0x0fff0000);
66 nv_wo32(chan, 0x047c, 0x00000101);
67 nv_wo32(chan, 0x0490, 0x00000111);
68 nv_wo32(chan, 0x04a8, 0x44400000);
69 for (i = 0x04d4; i <= 0x04e0; i += 4)
70 nv_wo32(chan, i, 0x00030303);
71 for (i = 0x04f4; i <= 0x0500; i += 4)
72 nv_wo32(chan, i, 0x00080000);
73 for (i = 0x050c; i <= 0x0518; i += 4)
74 nv_wo32(chan, i, 0x01012000);
75 for (i = 0x051c; i <= 0x0528; i += 4)
76 nv_wo32(chan, i, 0x000105b8);
77 for (i = 0x052c; i <= 0x0538; i += 4)
78 nv_wo32(chan, i, 0x00080008);
79 for (i = 0x055c; i <= 0x0598; i += 4)
80 nv_wo32(chan, i, 0x07ff0000);
81 nv_wo32(chan, 0x05a4, 0x4b7fffff);
82 nv_wo32(chan, 0x05fc, 0x00000001);
83 nv_wo32(chan, 0x0604, 0x00004000);
84 nv_wo32(chan, 0x0610, 0x00000001);
85 nv_wo32(chan, 0x0618, 0x00040000);
86 nv_wo32(chan, 0x061c, 0x00010000);
87 for (i = 0x1c1c; i <= 0x248c; i += 16) {
88 nv_wo32(chan, (i + 0), 0x10700ff9);
89 nv_wo32(chan, (i + 4), 0x0436086c);
90 nv_wo32(chan, (i + 8), 0x000c001b);
91 }
92 nv_wo32(chan, 0x281c, 0x3f800000);
93 nv_wo32(chan, 0x2830, 0x3f800000);
94 nv_wo32(chan, 0x285c, 0x40000000);
95 nv_wo32(chan, 0x2860, 0x3f800000);
96 nv_wo32(chan, 0x2864, 0x3f000000);
97 nv_wo32(chan, 0x286c, 0x40000000);
98 nv_wo32(chan, 0x2870, 0x3f800000);
99 nv_wo32(chan, 0x2878, 0xbf800000);
100 nv_wo32(chan, 0x2880, 0xbf800000);
101 nv_wo32(chan, 0x34a4, 0x000fe000);
102 nv_wo32(chan, 0x3530, 0x000003f8);
103 nv_wo32(chan, 0x3540, 0x002fe000);
104 for (i = 0x355c; i <= 0x3578; i += 4)
105 nv_wo32(chan, i, 0x001c527c);
106 return 0;
107}
108
109int
110nv20_graph_context_init(struct nouveau_object *object)
111{
112 struct nv20_graph_priv *priv = (void *)object->engine;
113 struct nv20_graph_chan *chan = (void *)object;
114 int ret;
115
116 ret = nouveau_graph_context_init(&chan->base);
117 if (ret)
118 return ret;
119
120 nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
121 return 0;
122}
123
124int
125nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
126{
127 struct nv20_graph_priv *priv = (void *)object->engine;
128 struct nv20_graph_chan *chan = (void *)object;
129 int chid = -1;
130
131 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
132 if (nv_rd32(priv, 0x400144) & 0x00010000)
133 chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
134 if (chan->chid == chid) {
135 nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
136 nv_wr32(priv, 0x400788, 0x00000002);
137 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
138 nv_wr32(priv, 0x400144, 0x10000000);
139 nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
140 }
141 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
142
143 nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
144 return nouveau_graph_context_fini(&chan->base, suspend);
145}
146
147static struct nouveau_oclass
148nv20_graph_cclass = {
149 .handle = NV_ENGCTX(GR, 0x20),
150 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nv20_graph_context_ctor,
152 .dtor = _nouveau_graph_context_dtor,
153 .init = nv20_graph_context_init,
154 .fini = nv20_graph_context_fini,
155 .rd32 = _nouveau_graph_context_rd32,
156 .wr32 = _nouveau_graph_context_wr32,
157 },
158};
159
160/*******************************************************************************
161 * PGRAPH engine/subdev functions
162 ******************************************************************************/
163
164void
165nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
166{
167 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
168 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
169 struct nv20_graph_priv *priv = (void *)engine;
170 unsigned long flags;
171
172 pfifo->pause(pfifo, &flags);
173 nv04_graph_idle(priv);
174
175 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
176 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
177 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
178
179 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
180 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
181 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
182 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
185
186 if (nv_device(engine)->card_type == NV_20) {
187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
190 }
191
192 pfifo->start(pfifo, &flags);
193}
194
195void
196nv20_graph_intr(struct nouveau_subdev *subdev)
197{
198 struct nouveau_engine *engine = nv_engine(subdev);
199 struct nouveau_object *engctx;
200 struct nouveau_handle *handle;
201 struct nv20_graph_priv *priv = (void *)subdev;
202 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
203 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
204 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
205 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
206 u32 chid = (addr & 0x01f00000) >> 20;
207 u32 subc = (addr & 0x00070000) >> 16;
208 u32 mthd = (addr & 0x00001ffc);
209 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
210 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
211 u32 show = stat;
212
213 engctx = nouveau_engctx_get(engine, chid);
214 if (stat & NV_PGRAPH_INTR_ERROR) {
215 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
216 handle = nouveau_handle_get_class(engctx, class);
217 if (handle && !nv_call(handle->object, mthd, data))
218 show &= ~NV_PGRAPH_INTR_ERROR;
219 nouveau_handle_put(handle);
220 }
221 }
222
223 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
225
226 if (show) {
227 nv_info(priv, "");
228 nouveau_bitfield_print(nv10_graph_intr_name, show);
229 printk(" nsource:");
230 nouveau_bitfield_print(nv04_graph_nsource, nsource);
231 printk(" nstatus:");
232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
233 printk("\n");
234 nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
235 chid, subc, class, mthd, data);
236 }
237
238 nouveau_engctx_put(engctx);
239}
240
241static int
242nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
243 struct nouveau_oclass *oclass, void *data, u32 size,
244 struct nouveau_object **pobject)
245{
246 struct nv20_graph_priv *priv;
247 int ret;
248
249 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
250 *pobject = nv_object(priv);
251 if (ret)
252 return ret;
253
254 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
255 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
256 if (ret)
257 return ret;
258
259 nv_subdev(priv)->unit = 0x00001000;
260 nv_subdev(priv)->intr = nv20_graph_intr;
261 nv_engine(priv)->cclass = &nv20_graph_cclass;
262 nv_engine(priv)->sclass = nv20_graph_sclass;
263 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
264 return 0;
265}
266
267void
268nv20_graph_dtor(struct nouveau_object *object)
269{
270 struct nv20_graph_priv *priv = (void *)object;
271 nouveau_gpuobj_ref(NULL, &priv->ctxtab);
272 nouveau_graph_destroy(&priv->base);
273}
274
275int
276nv20_graph_init(struct nouveau_object *object)
277{
278 struct nouveau_engine *engine = nv_engine(object);
279 struct nv20_graph_priv *priv = (void *)engine;
280 struct nouveau_fb *pfb = nouveau_fb(object);
281 u32 tmp, vramsz;
282 int ret, i;
283
284 ret = nouveau_graph_init(&priv->base);
285 if (ret)
286 return ret;
287
288 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
289
290 if (nv_device(priv)->chipset == 0x20) {
291 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
292 for (i = 0; i < 15; i++)
293 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
294 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
295 } else {
296 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
297 for (i = 0; i < 32; i++)
298 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
299 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
300 }
301
302 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
303 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
304
305 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
306 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
307 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
308 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
309 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
310 nv_wr32(priv, 0x40009C , 0x00000040);
311
312 if (nv_device(priv)->chipset >= 0x25) {
313 nv_wr32(priv, 0x400890, 0x00a8cfff);
314 nv_wr32(priv, 0x400610, 0x304B1FB6);
315 nv_wr32(priv, 0x400B80, 0x1cbd3883);
316 nv_wr32(priv, 0x400B84, 0x44000000);
317 nv_wr32(priv, 0x400098, 0x40000080);
318 nv_wr32(priv, 0x400B88, 0x000000ff);
319
320 } else {
321 nv_wr32(priv, 0x400880, 0x0008c7df);
322 nv_wr32(priv, 0x400094, 0x00000005);
323 nv_wr32(priv, 0x400B80, 0x45eae20e);
324 nv_wr32(priv, 0x400B84, 0x24000000);
325 nv_wr32(priv, 0x400098, 0x00000040);
326 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
327 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
328 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
329 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
330 }
331
332 /* Turn all the tiling regions off. */
333 for (i = 0; i < pfb->tile.regions; i++)
334 engine->tile_prog(engine, i);
335
336 nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
337 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
338 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
339
340 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
341 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
342
343 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
344 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
345 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
346 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
347
348 /* begin RAM config */
349 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
350 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
351 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
352 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
353 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
354 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
355 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
356 nv_wr32(priv, 0x400820, 0);
357 nv_wr32(priv, 0x400824, 0);
358 nv_wr32(priv, 0x400864, vramsz - 1);
359 nv_wr32(priv, 0x400868, vramsz - 1);
360
361 /* interesting.. the below overwrites some of the tile setup above.. */
362 nv_wr32(priv, 0x400B20, 0x00000000);
363 nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
364
365 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
366 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
367 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
368 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
369 return 0;
370}
371
372struct nouveau_oclass
373nv20_graph_oclass = {
374 .handle = NV_ENGINE(GR, 0x20),
375 .ofuncs = &(struct nouveau_ofuncs) {
376 .ctor = nv20_graph_ctor,
377 .dtor = nv20_graph_dtor,
378 .init = nv20_graph_init,
379 .fini = _nouveau_graph_fini,
380 },
381};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 000000000000..2bea7313e03f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
1#ifndef __NV20_GRAPH_H__
2#define __NV20_GRAPH_H__
3
4#include <core/enum.h>
5
6#include <engine/graph.h>
7#include <engine/fifo.h>
8
9struct nv20_graph_priv {
10 struct nouveau_graph base;
11 struct nouveau_gpuobj *ctxtab;
12};
13
14struct nv20_graph_chan {
15 struct nouveau_graph_chan base;
16 int chid;
17};
18
19extern struct nouveau_oclass nv25_graph_sclass[];
20int nv20_graph_context_init(struct nouveau_object *);
21int nv20_graph_context_fini(struct nouveau_object *, bool);
22
23void nv20_graph_tile_prog(struct nouveau_engine *, int);
24void nv20_graph_intr(struct nouveau_subdev *);
25
26void nv20_graph_dtor(struct nouveau_object *);
27int nv20_graph_init(struct nouveau_object *);
28
29int nv30_graph_init(struct nouveau_object *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 000000000000..b2b650dd8b28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18struct nouveau_oclass
19nv25_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
32 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
33 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
34 { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv25_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x035c, 0xffff0000);
61 nv_wo32(chan, 0x03c0, 0x0fff0000);
62 nv_wo32(chan, 0x03c4, 0x0fff0000);
63 nv_wo32(chan, 0x049c, 0x00000101);
64 nv_wo32(chan, 0x04b0, 0x00000111);
65 nv_wo32(chan, 0x04c8, 0x00000080);
66 nv_wo32(chan, 0x04cc, 0xffff0000);
67 nv_wo32(chan, 0x04d0, 0x00000001);
68 nv_wo32(chan, 0x04e4, 0x44400000);
69 nv_wo32(chan, 0x04fc, 0x4b800000);
70 for (i = 0x0510; i <= 0x051c; i += 4)
71 nv_wo32(chan, i, 0x00030303);
72 for (i = 0x0530; i <= 0x053c; i += 4)
73 nv_wo32(chan, i, 0x00080000);
74 for (i = 0x0548; i <= 0x0554; i += 4)
75 nv_wo32(chan, i, 0x01012000);
76 for (i = 0x0558; i <= 0x0564; i += 4)
77 nv_wo32(chan, i, 0x000105b8);
78 for (i = 0x0568; i <= 0x0574; i += 4)
79 nv_wo32(chan, i, 0x00080008);
80 for (i = 0x0598; i <= 0x05d4; i += 4)
81 nv_wo32(chan, i, 0x07ff0000);
82 nv_wo32(chan, 0x05e0, 0x4b7fffff);
83 nv_wo32(chan, 0x0620, 0x00000080);
84 nv_wo32(chan, 0x0624, 0x30201000);
85 nv_wo32(chan, 0x0628, 0x70605040);
86 nv_wo32(chan, 0x062c, 0xb0a09080);
87 nv_wo32(chan, 0x0630, 0xf0e0d0c0);
88 nv_wo32(chan, 0x0664, 0x00000001);
89 nv_wo32(chan, 0x066c, 0x00004000);
90 nv_wo32(chan, 0x0678, 0x00000001);
91 nv_wo32(chan, 0x0680, 0x00040000);
92 nv_wo32(chan, 0x0684, 0x00010000);
93 for (i = 0x1b04; i <= 0x2374; i += 16) {
94 nv_wo32(chan, (i + 0), 0x10700ff9);
95 nv_wo32(chan, (i + 4), 0x0436086c);
96 nv_wo32(chan, (i + 8), 0x000c001b);
97 }
98 nv_wo32(chan, 0x2704, 0x3f800000);
99 nv_wo32(chan, 0x2718, 0x3f800000);
100 nv_wo32(chan, 0x2744, 0x40000000);
101 nv_wo32(chan, 0x2748, 0x3f800000);
102 nv_wo32(chan, 0x274c, 0x3f000000);
103 nv_wo32(chan, 0x2754, 0x40000000);
104 nv_wo32(chan, 0x2758, 0x3f800000);
105 nv_wo32(chan, 0x2760, 0xbf800000);
106 nv_wo32(chan, 0x2768, 0xbf800000);
107 nv_wo32(chan, 0x308c, 0x000fe000);
108 nv_wo32(chan, 0x3108, 0x000003f8);
109 nv_wo32(chan, 0x3468, 0x002fe000);
110 for (i = 0x3484; i <= 0x34a0; i += 4)
111 nv_wo32(chan, i, 0x001c527c);
112 return 0;
113}
114
115static struct nouveau_oclass
116nv25_graph_cclass = {
117 .handle = NV_ENGCTX(GR, 0x25),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv25_graph_context_ctor,
120 .dtor = _nouveau_graph_context_dtor,
121 .init = nv20_graph_context_init,
122 .fini = nv20_graph_context_fini,
123 .rd32 = _nouveau_graph_context_rd32,
124 .wr32 = _nouveau_graph_context_wr32,
125 },
126};
127
128/*******************************************************************************
129 * PGRAPH engine/subdev functions
130 ******************************************************************************/
131
132static int
133nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
134 struct nouveau_oclass *oclass, void *data, u32 size,
135 struct nouveau_object **pobject)
136{
137 struct nv20_graph_priv *priv;
138 int ret;
139
140 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
141 *pobject = nv_object(priv);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
147 if (ret)
148 return ret;
149
150 nv_subdev(priv)->unit = 0x00001000;
151 nv_subdev(priv)->intr = nv20_graph_intr;
152 nv_engine(priv)->cclass = &nv25_graph_cclass;
153 nv_engine(priv)->sclass = nv25_graph_sclass;
154 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
155 return 0;
156}
157
158struct nouveau_oclass
159nv25_graph_oclass = {
160 .handle = NV_ENGINE(GR, 0x25),
161 .ofuncs = &(struct nouveau_ofuncs) {
162 .ctor = nv25_graph_ctor,
163 .dtor = nv20_graph_dtor,
164 .init = nv20_graph_init,
165 .fini = _nouveau_graph_fini,
166 },
167};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 000000000000..700462fa0ae0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * PGRAPH context
16 ******************************************************************************/
17
18static int
19nv2a_graph_context_ctor(struct nouveau_object *parent,
20 struct nouveau_object *engine,
21 struct nouveau_oclass *oclass, void *data, u32 size,
22 struct nouveau_object **pobject)
23{
24 struct nv20_graph_chan *chan;
25 int ret, i;
26
27 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
28 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
29 *pobject = nv_object(chan);
30 if (ret)
31 return ret;
32
33 chan->chid = nouveau_fifo_chan(parent)->chid;
34
35 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
36 nv_wo32(chan, 0x033c, 0xffff0000);
37 nv_wo32(chan, 0x03a0, 0x0fff0000);
38 nv_wo32(chan, 0x03a4, 0x0fff0000);
39 nv_wo32(chan, 0x047c, 0x00000101);
40 nv_wo32(chan, 0x0490, 0x00000111);
41 nv_wo32(chan, 0x04a8, 0x44400000);
42 for (i = 0x04d4; i <= 0x04e0; i += 4)
43 nv_wo32(chan, i, 0x00030303);
44 for (i = 0x04f4; i <= 0x0500; i += 4)
45 nv_wo32(chan, i, 0x00080000);
46 for (i = 0x050c; i <= 0x0518; i += 4)
47 nv_wo32(chan, i, 0x01012000);
48 for (i = 0x051c; i <= 0x0528; i += 4)
49 nv_wo32(chan, i, 0x000105b8);
50 for (i = 0x052c; i <= 0x0538; i += 4)
51 nv_wo32(chan, i, 0x00080008);
52 for (i = 0x055c; i <= 0x0598; i += 4)
53 nv_wo32(chan, i, 0x07ff0000);
54 nv_wo32(chan, 0x05a4, 0x4b7fffff);
55 nv_wo32(chan, 0x05fc, 0x00000001);
56 nv_wo32(chan, 0x0604, 0x00004000);
57 nv_wo32(chan, 0x0610, 0x00000001);
58 nv_wo32(chan, 0x0618, 0x00040000);
59 nv_wo32(chan, 0x061c, 0x00010000);
60 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
61 nv_wo32(chan, (i + 0), 0x10700ff9);
62 nv_wo32(chan, (i + 4), 0x0436086c);
63 nv_wo32(chan, (i + 8), 0x000c001b);
64 }
65 nv_wo32(chan, 0x269c, 0x3f800000);
66 nv_wo32(chan, 0x26b0, 0x3f800000);
67 nv_wo32(chan, 0x26dc, 0x40000000);
68 nv_wo32(chan, 0x26e0, 0x3f800000);
69 nv_wo32(chan, 0x26e4, 0x3f000000);
70 nv_wo32(chan, 0x26ec, 0x40000000);
71 nv_wo32(chan, 0x26f0, 0x3f800000);
72 nv_wo32(chan, 0x26f8, 0xbf800000);
73 nv_wo32(chan, 0x2700, 0xbf800000);
74 nv_wo32(chan, 0x3024, 0x000fe000);
75 nv_wo32(chan, 0x30a0, 0x000003f8);
76 nv_wo32(chan, 0x33fc, 0x002fe000);
77 for (i = 0x341c; i <= 0x3438; i += 4)
78 nv_wo32(chan, i, 0x001c527c);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv2a_graph_cclass = {
84 .handle = NV_ENGCTX(GR, 0x2a),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv2a_graph_context_ctor,
87 .dtor = _nouveau_graph_context_dtor,
88 .init = nv20_graph_context_init,
89 .fini = nv20_graph_context_fini,
90 .rd32 = _nouveau_graph_context_rd32,
91 .wr32 = _nouveau_graph_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PGRAPH engine/subdev functions
97 ******************************************************************************/
98
99static int
100nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nv20_graph_priv *priv;
105 int ret;
106
107 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
114 if (ret)
115 return ret;
116
117 nv_subdev(priv)->unit = 0x00001000;
118 nv_subdev(priv)->intr = nv20_graph_intr;
119 nv_engine(priv)->cclass = &nv2a_graph_cclass;
120 nv_engine(priv)->sclass = nv25_graph_sclass;
121 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
122 return 0;
123}
124
125struct nouveau_oclass
126nv2a_graph_oclass = {
127 .handle = NV_ENGINE(GR, 0x2a),
128 .ofuncs = &(struct nouveau_ofuncs) {
129 .ctor = nv2a_graph_ctor,
130 .dtor = nv20_graph_dtor,
131 .init = nv20_graph_init,
132 .fini = _nouveau_graph_fini,
133 },
134};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 000000000000..cedadaa92d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv30_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv30_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x0410, 0x00000101);
63 nv_wo32(chan, 0x0424, 0x00000111);
64 nv_wo32(chan, 0x0428, 0x00000060);
65 nv_wo32(chan, 0x0444, 0x00000080);
66 nv_wo32(chan, 0x0448, 0xffff0000);
67 nv_wo32(chan, 0x044c, 0x00000001);
68 nv_wo32(chan, 0x0460, 0x44400000);
69 nv_wo32(chan, 0x048c, 0xffff0000);
70 for (i = 0x04e0; i < 0x04e8; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04ec, 0x00011100);
73 for (i = 0x0508; i < 0x0548; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0550, 0x4b7fffff);
76 nv_wo32(chan, 0x058c, 0x00000080);
77 nv_wo32(chan, 0x0590, 0x30201000);
78 nv_wo32(chan, 0x0594, 0x70605040);
79 nv_wo32(chan, 0x0598, 0xb8a89888);
80 nv_wo32(chan, 0x059c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05b0, 0xb0000000);
82 for (i = 0x0600; i < 0x0640; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0640; i < 0x0680; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06c0; i < 0x0700; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x0700; i < 0x0740; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0740; i < 0x0780; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x085c, 0x00040000);
93 nv_wo32(chan, 0x0860, 0x00010000);
94 for (i = 0x0864; i < 0x0874; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x30b8; i < 0x30c8; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x344c, 0x3f800000);
104 nv_wo32(chan, 0x3808, 0x3f800000);
105 nv_wo32(chan, 0x381c, 0x3f800000);
106 nv_wo32(chan, 0x3848, 0x40000000);
107 nv_wo32(chan, 0x384c, 0x3f800000);
108 nv_wo32(chan, 0x3850, 0x3f000000);
109 nv_wo32(chan, 0x3858, 0x40000000);
110 nv_wo32(chan, 0x385c, 0x3f800000);
111 nv_wo32(chan, 0x3864, 0xbf800000);
112 nv_wo32(chan, 0x386c, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv30_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x30),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv30_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv30_graph_cclass;
154 nv_engine(priv)->sclass = nv30_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159int
160nv30_graph_init(struct nouveau_object *object)
161{
162 struct nouveau_engine *engine = nv_engine(object);
163 struct nv20_graph_priv *priv = (void *)engine;
164 struct nouveau_fb *pfb = nouveau_fb(object);
165 int ret, i;
166
167 ret = nouveau_graph_init(&priv->base);
168 if (ret)
169 return ret;
170
171 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
172
173 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
174 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
175
176 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
177 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
178 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
179 nv_wr32(priv, 0x400890, 0x01b463ff);
180 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
181 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
182 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
183 nv_wr32(priv, 0x400B80, 0x1003d888);
184 nv_wr32(priv, 0x400B84, 0x0c000000);
185 nv_wr32(priv, 0x400098, 0x00000000);
186 nv_wr32(priv, 0x40009C, 0x0005ad00);
187 nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
188 nv_wr32(priv, 0x4000a0, 0x00000000);
189 nv_wr32(priv, 0x4000a4, 0x00000008);
190 nv_wr32(priv, 0x4008a8, 0xb784a400);
191 nv_wr32(priv, 0x400ba0, 0x002f8685);
192 nv_wr32(priv, 0x400ba4, 0x00231f3f);
193 nv_wr32(priv, 0x4008a4, 0x40000020);
194
195 if (nv_device(priv)->chipset == 0x34) {
196 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
197 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
198 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
199 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
200 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
201 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
202 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
203 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
204 }
205
206 nv_wr32(priv, 0x4000c0, 0x00000016);
207
208 /* Turn all the tiling regions off. */
209 for (i = 0; i < pfb->tile.regions; i++)
210 engine->tile_prog(engine, i);
211
212 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
213 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
214 nv_wr32(priv, 0x0040075c , 0x00000001);
215
216 /* begin RAM config */
217 /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
218 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
219 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
220 if (nv_device(priv)->chipset != 0x34) {
221 nv_wr32(priv, 0x400750, 0x00EA0000);
222 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
223 nv_wr32(priv, 0x400750, 0x00EA0004);
224 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
225 }
226 return 0;
227}
228
229struct nouveau_oclass
230nv30_graph_oclass = {
231 .handle = NV_ENGINE(GR, 0x30),
232 .ofuncs = &(struct nouveau_ofuncs) {
233 .ctor = nv30_graph_ctor,
234 .dtor = nv20_graph_dtor,
235 .init = nv30_graph_init,
236 .fini = _nouveau_graph_fini,
237 },
238};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 000000000000..273f6320027b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv34_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv34_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x040c, 0x01000101);
63 nv_wo32(chan, 0x0420, 0x00000111);
64 nv_wo32(chan, 0x0424, 0x00000060);
65 nv_wo32(chan, 0x0440, 0x00000080);
66 nv_wo32(chan, 0x0444, 0xffff0000);
67 nv_wo32(chan, 0x0448, 0x00000001);
68 nv_wo32(chan, 0x045c, 0x44400000);
69 nv_wo32(chan, 0x0480, 0xffff0000);
70 for (i = 0x04d4; i < 0x04dc; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04e0, 0x00011100);
73 for (i = 0x04fc; i < 0x053c; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0544, 0x4b7fffff);
76 nv_wo32(chan, 0x057c, 0x00000080);
77 nv_wo32(chan, 0x0580, 0x30201000);
78 nv_wo32(chan, 0x0584, 0x70605040);
79 nv_wo32(chan, 0x0588, 0xb8a89888);
80 nv_wo32(chan, 0x058c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05a0, 0xb0000000);
82 for (i = 0x05f0; i < 0x0630; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0630; i < 0x0670; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06b0; i < 0x06f0; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x06f0; i < 0x0730; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0730; i < 0x0770; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x0850, 0x00040000);
93 nv_wo32(chan, 0x0854, 0x00010000);
94 for (i = 0x0858; i < 0x0868; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x15ac; i <= 0x271c ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x274c; i < 0x275c; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x2ae0, 0x3f800000);
104 nv_wo32(chan, 0x2e9c, 0x3f800000);
105 nv_wo32(chan, 0x2eb0, 0x3f800000);
106 nv_wo32(chan, 0x2edc, 0x40000000);
107 nv_wo32(chan, 0x2ee0, 0x3f800000);
108 nv_wo32(chan, 0x2ee4, 0x3f000000);
109 nv_wo32(chan, 0x2eec, 0x40000000);
110 nv_wo32(chan, 0x2ef0, 0x3f800000);
111 nv_wo32(chan, 0x2ef8, 0xbf800000);
112 nv_wo32(chan, 0x2f00, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv34_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x34),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv34_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv34_graph_cclass;
154 nv_engine(priv)->sclass = nv34_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159struct nouveau_oclass
160nv34_graph_oclass = {
161 .handle = NV_ENGINE(GR, 0x34),
162 .ofuncs = &(struct nouveau_ofuncs) {
163 .ctor = nv34_graph_ctor,
164 .dtor = nv20_graph_dtor,
165 .init = nv30_graph_init,
166 .fini = _nouveau_graph_fini,
167 },
168};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 000000000000..f40ee2116ee1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include "nv20.h"
10#include "regs.h"
11
12/*******************************************************************************
13 * Graphics object classes
14 ******************************************************************************/
15
16static struct nouveau_oclass
17nv35_graph_sclass[] = {
18 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
19 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
20 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
21 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
22 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
23 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
24 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
25 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
26 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
27 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
28 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
29 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
30 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
31 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
32 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
33 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
34 { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv35_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x040c, 0x00000101);
61 nv_wo32(chan, 0x0420, 0x00000111);
62 nv_wo32(chan, 0x0424, 0x00000060);
63 nv_wo32(chan, 0x0440, 0x00000080);
64 nv_wo32(chan, 0x0444, 0xffff0000);
65 nv_wo32(chan, 0x0448, 0x00000001);
66 nv_wo32(chan, 0x045c, 0x44400000);
67 nv_wo32(chan, 0x0488, 0xffff0000);
68 for (i = 0x04dc; i < 0x04e4; i += 4)
69 nv_wo32(chan, i, 0x0fff0000);
70 nv_wo32(chan, 0x04e8, 0x00011100);
71 for (i = 0x0504; i < 0x0544; i += 4)
72 nv_wo32(chan, i, 0x07ff0000);
73 nv_wo32(chan, 0x054c, 0x4b7fffff);
74 nv_wo32(chan, 0x0588, 0x00000080);
75 nv_wo32(chan, 0x058c, 0x30201000);
76 nv_wo32(chan, 0x0590, 0x70605040);
77 nv_wo32(chan, 0x0594, 0xb8a89888);
78 nv_wo32(chan, 0x0598, 0xf8e8d8c8);
79 nv_wo32(chan, 0x05ac, 0xb0000000);
80 for (i = 0x0604; i < 0x0644; i += 4)
81 nv_wo32(chan, i, 0x00010588);
82 for (i = 0x0644; i < 0x0684; i += 4)
83 nv_wo32(chan, i, 0x00030303);
84 for (i = 0x06c4; i < 0x0704; i += 4)
85 nv_wo32(chan, i, 0x0008aae4);
86 for (i = 0x0704; i < 0x0744; i += 4)
87 nv_wo32(chan, i, 0x01012000);
88 for (i = 0x0744; i < 0x0784; i += 4)
89 nv_wo32(chan, i, 0x00080008);
90 nv_wo32(chan, 0x0860, 0x00040000);
91 nv_wo32(chan, 0x0864, 0x00010000);
92 for (i = 0x0868; i < 0x0878; i += 4)
93 nv_wo32(chan, i, 0x00040004);
94 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
95 nv_wo32(chan, i + 0, 0x10700ff9);
96 nv_wo32(chan, i + 4, 0x0436086c);
97 nv_wo32(chan, i + 8, 0x000c001b);
98 }
99 for (i = 0x30bc; i < 0x30cc; i += 4)
100 nv_wo32(chan, i, 0x0000ffff);
101 nv_wo32(chan, 0x3450, 0x3f800000);
102 nv_wo32(chan, 0x380c, 0x3f800000);
103 nv_wo32(chan, 0x3820, 0x3f800000);
104 nv_wo32(chan, 0x384c, 0x40000000);
105 nv_wo32(chan, 0x3850, 0x3f800000);
106 nv_wo32(chan, 0x3854, 0x3f000000);
107 nv_wo32(chan, 0x385c, 0x40000000);
108 nv_wo32(chan, 0x3860, 0x3f800000);
109 nv_wo32(chan, 0x3868, 0xbf800000);
110 nv_wo32(chan, 0x3870, 0xbf800000);
111 return 0;
112}
113
114static struct nouveau_oclass
115nv35_graph_cclass = {
116 .handle = NV_ENGCTX(GR, 0x35),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nv35_graph_context_ctor,
119 .dtor = _nouveau_graph_context_dtor,
120 .init = nv20_graph_context_init,
121 .fini = nv20_graph_context_fini,
122 .rd32 = _nouveau_graph_context_rd32,
123 .wr32 = _nouveau_graph_context_wr32,
124 },
125};
126
127/*******************************************************************************
128 * PGRAPH engine/subdev functions
129 ******************************************************************************/
130
131static int
132nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
133 struct nouveau_oclass *oclass, void *data, u32 size,
134 struct nouveau_object **pobject)
135{
136 struct nv20_graph_priv *priv;
137 int ret;
138
139 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
140 *pobject = nv_object(priv);
141 if (ret)
142 return ret;
143
144 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
146 if (ret)
147 return ret;
148
149 nv_subdev(priv)->unit = 0x00001000;
150 nv_subdev(priv)->intr = nv20_graph_intr;
151 nv_engine(priv)->cclass = &nv35_graph_cclass;
152 nv_engine(priv)->sclass = nv35_graph_sclass;
153 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
154 return 0;
155}
156
157struct nouveau_oclass
158nv35_graph_oclass = {
159 .handle = NV_ENGINE(GR, 0x35),
160 .ofuncs = &(struct nouveau_ofuncs) {
161 .ctor = nv35_graph_ctor,
162 .dtor = nv20_graph_dtor,
163 .init = nv30_graph_init,
164 .fini = _nouveau_graph_fini,
165 },
166};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
new file mode 100644
index 000000000000..8d0021049ec0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -0,0 +1,495 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/engctx.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32
33#include <engine/graph.h>
34#include <engine/fifo.h>
35
36#include "nv40.h"
37#include "regs.h"
38
39struct nv40_graph_priv {
40 struct nouveau_graph base;
41 u32 size;
42};
43
44struct nv40_graph_chan {
45 struct nouveau_graph_chan base;
46};
47
48/*******************************************************************************
49 * Graphics object classes
50 ******************************************************************************/
51
52static int
53nv40_graph_object_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nouveau_gpuobj *obj;
59 int ret;
60
61 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
62 20, 16, 0, &obj);
63 *pobject = nv_object(obj);
64 if (ret)
65 return ret;
66
67 nv_wo32(obj, 0x00, nv_mclass(obj));
68 nv_wo32(obj, 0x04, 0x00000000);
69 nv_wo32(obj, 0x08, 0x00000000);
70#ifdef __BIG_ENDIAN
71 nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
72#endif
73 nv_wo32(obj, 0x0c, 0x00000000);
74 nv_wo32(obj, 0x10, 0x00000000);
75 return 0;
76}
77
78static struct nouveau_ofuncs
79nv40_graph_ofuncs = {
80 .ctor = nv40_graph_object_ctor,
81 .dtor = _nouveau_gpuobj_dtor,
82 .init = _nouveau_gpuobj_init,
83 .fini = _nouveau_gpuobj_fini,
84 .rd32 = _nouveau_gpuobj_rd32,
85 .wr32 = _nouveau_gpuobj_wr32,
86};
87
88static struct nouveau_oclass
89nv40_graph_sclass[] = {
90 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
91 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
92 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
93 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
94 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
95 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
96 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
97 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
98 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
99 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
100 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
101 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
102 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
103 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
104 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
105 { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
106 {},
107};
108
109static struct nouveau_oclass
110nv44_graph_sclass[] = {
111 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
112 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
113 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
114 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
115 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
116 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
117 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
118 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
119 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
120 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
121 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
122 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
123 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
124 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
125 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
126 { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
127 {},
128};
129
130/*******************************************************************************
131 * PGRAPH context
132 ******************************************************************************/
133
134static int
135nv40_graph_context_ctor(struct nouveau_object *parent,
136 struct nouveau_object *engine,
137 struct nouveau_oclass *oclass, void *data, u32 size,
138 struct nouveau_object **pobject)
139{
140 struct nv40_graph_priv *priv = (void *)engine;
141 struct nv40_graph_chan *chan;
142 int ret;
143
144 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
145 priv->size, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &chan);
147 *pobject = nv_object(chan);
148 if (ret)
149 return ret;
150
151 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
152 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
153 return 0;
154}
155
156static int
157nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
158{
159 struct nv04_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0;
163
164 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
165
166 if (nv_rd32(priv, 0x40032c) == inst) {
167 if (suspend) {
168 nv_wr32(priv, 0x400720, 0x00000000);
169 nv_wr32(priv, 0x400784, inst);
170 nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
171 nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
172 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
173 u32 insn = nv_rd32(priv, 0x400308);
174 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
175 ret = -EBUSY;
176 }
177 }
178
179 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
180 }
181
182 if (nv_rd32(priv, 0x400330) == inst)
183 nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
184
185 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
186 return ret;
187}
188
189static struct nouveau_oclass
190nv40_graph_cclass = {
191 .handle = NV_ENGCTX(GR, 0x40),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv40_graph_context_ctor,
194 .dtor = _nouveau_graph_context_dtor,
195 .init = _nouveau_graph_context_init,
196 .fini = nv40_graph_context_fini,
197 .rd32 = _nouveau_graph_context_rd32,
198 .wr32 = _nouveau_graph_context_wr32,
199 },
200};
201
202/*******************************************************************************
203 * PGRAPH engine/subdev functions
204 ******************************************************************************/
205
206static void
207nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
208{
209 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
210 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
211 struct nv40_graph_priv *priv = (void *)engine;
212 unsigned long flags;
213
214 pfifo->pause(pfifo, &flags);
215 nv04_graph_idle(priv);
216
217 switch (nv_device(priv)->chipset) {
218 case 0x40:
219 case 0x41: /* guess */
220 case 0x42:
221 case 0x43:
222 case 0x45: /* guess */
223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
226 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 break;
231 case 0x44:
232 case 0x4a:
233 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
234 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break;
237 case 0x46:
238 case 0x47:
239 case 0x49:
240 case 0x4b:
241 case 0x4c:
242 case 0x67:
243 default:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
250 break;
251 }
252
253 pfifo->start(pfifo, &flags);
254}
255
256static void
257nv40_graph_intr(struct nouveau_subdev *subdev)
258{
259 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
260 struct nouveau_engine *engine = nv_engine(subdev);
261 struct nouveau_object *engctx;
262 struct nouveau_handle *handle = NULL;
263 struct nv40_graph_priv *priv = (void *)subdev;
264 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
265 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
266 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
267 u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
268 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
269 u32 subc = (addr & 0x00070000) >> 16;
270 u32 mthd = (addr & 0x00001ffc);
271 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
272 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
273 u32 show = stat;
274 int chid;
275
276 engctx = nouveau_engctx_get(engine, inst);
277 chid = pfifo->chid(pfifo, engctx);
278
279 if (stat & NV_PGRAPH_INTR_ERROR) {
280 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
281 handle = nouveau_handle_get_class(engctx, class);
282 if (handle && !nv_call(handle->object, mthd, data))
283 show &= ~NV_PGRAPH_INTR_ERROR;
284 nouveau_handle_put(handle);
285 }
286
287 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
288 nv_mask(priv, 0x402000, 0, 0);
289 }
290 }
291
292 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
293 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
294
295 if (show) {
296 nv_info(priv, "");
297 nouveau_bitfield_print(nv10_graph_intr_name, show);
298 printk(" nsource:");
299 nouveau_bitfield_print(nv04_graph_nsource, nsource);
300 printk(" nstatus:");
301 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
302 printk("\n");
303 nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
304 "mthd 0x%04x data 0x%08x\n",
305 chid, inst << 4, subc, class, mthd, data);
306 }
307
308 nouveau_engctx_put(engctx);
309}
310
311static int
312nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
313 struct nouveau_oclass *oclass, void *data, u32 size,
314 struct nouveau_object **pobject)
315{
316 struct nv40_graph_priv *priv;
317 int ret;
318
319 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
320 *pobject = nv_object(priv);
321 if (ret)
322 return ret;
323
324 nv_subdev(priv)->unit = 0x00001000;
325 nv_subdev(priv)->intr = nv40_graph_intr;
326 nv_engine(priv)->cclass = &nv40_graph_cclass;
327 if (nv44_graph_class(priv))
328 nv_engine(priv)->sclass = nv44_graph_sclass;
329 else
330 nv_engine(priv)->sclass = nv40_graph_sclass;
331 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
332 return 0;
333}
334
335static int
336nv40_graph_init(struct nouveau_object *object)
337{
338 struct nouveau_engine *engine = nv_engine(object);
339 struct nouveau_fb *pfb = nouveau_fb(object);
340 struct nv40_graph_priv *priv = (void *)engine;
341 int ret, i, j;
342 u32 vramsz;
343
344 ret = nouveau_graph_init(&priv->base);
345 if (ret)
346 return ret;
347
348 /* generate and upload context program */
349 nv40_grctx_init(nv_device(priv), &priv->size);
350
351 /* No context present currently */
352 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
353
354 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
355 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
356
357 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
358 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
359 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
360 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
361 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
362 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
363
364 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
365 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
366
367 j = nv_rd32(priv, 0x1540) & 0xff;
368 if (j) {
369 for (i = 0; !(j & 1); j >>= 1, i++)
370 ;
371 nv_wr32(priv, 0x405000, i);
372 }
373
374 if (nv_device(priv)->chipset == 0x40) {
375 nv_wr32(priv, 0x4009b0, 0x83280fff);
376 nv_wr32(priv, 0x4009b4, 0x000000a0);
377 } else {
378 nv_wr32(priv, 0x400820, 0x83280eff);
379 nv_wr32(priv, 0x400824, 0x000000a0);
380 }
381
382 switch (nv_device(priv)->chipset) {
383 case 0x40:
384 case 0x45:
385 nv_wr32(priv, 0x4009b8, 0x0078e366);
386 nv_wr32(priv, 0x4009bc, 0x0000014c);
387 break;
388 case 0x41:
389 case 0x42: /* pciid also 0x00Cx */
390 /* case 0x0120: XXX (pciid) */
391 nv_wr32(priv, 0x400828, 0x007596ff);
392 nv_wr32(priv, 0x40082c, 0x00000108);
393 break;
394 case 0x43:
395 nv_wr32(priv, 0x400828, 0x0072cb77);
396 nv_wr32(priv, 0x40082c, 0x00000108);
397 break;
398 case 0x44:
399 case 0x46: /* G72 */
400 case 0x4a:
401 case 0x4c: /* G7x-based C51 */
402 case 0x4e:
403 nv_wr32(priv, 0x400860, 0);
404 nv_wr32(priv, 0x400864, 0);
405 break;
406 case 0x47: /* G70 */
407 case 0x49: /* G71 */
408 case 0x4b: /* G73 */
409 nv_wr32(priv, 0x400828, 0x07830610);
410 nv_wr32(priv, 0x40082c, 0x0000016A);
411 break;
412 default:
413 break;
414 }
415
416 nv_wr32(priv, 0x400b38, 0x2ffff800);
417 nv_wr32(priv, 0x400b3c, 0x00006000);
418
419 /* Tiling related stuff. */
420 switch (nv_device(priv)->chipset) {
421 case 0x44:
422 case 0x4a:
423 nv_wr32(priv, 0x400bc4, 0x1003d888);
424 nv_wr32(priv, 0x400bbc, 0xb7a7b500);
425 break;
426 case 0x46:
427 nv_wr32(priv, 0x400bc4, 0x0000e024);
428 nv_wr32(priv, 0x400bbc, 0xb7a7b520);
429 break;
430 case 0x4c:
431 case 0x4e:
432 case 0x67:
433 nv_wr32(priv, 0x400bc4, 0x1003d888);
434 nv_wr32(priv, 0x400bbc, 0xb7a7b540);
435 break;
436 default:
437 break;
438 }
439
440 /* Turn all the tiling regions off. */
441 for (i = 0; i < pfb->tile.regions; i++)
442 engine->tile_prog(engine, i);
443
444 /* begin RAM config */
445 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
446 switch (nv_device(priv)->chipset) {
447 case 0x40:
448 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
449 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
450 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
451 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
452 nv_wr32(priv, 0x400820, 0);
453 nv_wr32(priv, 0x400824, 0);
454 nv_wr32(priv, 0x400864, vramsz);
455 nv_wr32(priv, 0x400868, vramsz);
456 break;
457 default:
458 switch (nv_device(priv)->chipset) {
459 case 0x41:
460 case 0x42:
461 case 0x43:
462 case 0x45:
463 case 0x4e:
464 case 0x44:
465 case 0x4a:
466 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
467 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
468 break;
469 default:
470 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
471 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
472 break;
473 }
474 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
475 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
476 nv_wr32(priv, 0x400840, 0);
477 nv_wr32(priv, 0x400844, 0);
478 nv_wr32(priv, 0x4008A0, vramsz);
479 nv_wr32(priv, 0x4008A4, vramsz);
480 break;
481 }
482
483 return 0;
484}
485
486struct nouveau_oclass
487nv40_graph_oclass = {
488 .handle = NV_ENGINE(GR, 0x40),
489 .ofuncs = &(struct nouveau_ofuncs) {
490 .ctor = nv40_graph_ctor,
491 .dtor = _nouveau_graph_dtor,
492 .init = nv40_graph_init,
493 .fini = _nouveau_graph_fini,
494 },
495};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 000000000000..d2ac975afc2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
1#ifndef __NV40_GRAPH_H__
2#define __NV40_GRAPH_H__
3
4/* returns 1 if device is one of the nv4x using the 0x4497 object class,
5 * helpful to determine a number of other hardware features
6 */
7static inline int
8nv44_graph_class(void *priv)
9{
10 struct nouveau_device *device = nv_device(priv);
11
12 if ((device->chipset & 0xf0) == 0x60)
13 return 1;
14
15 return !(0x0baf & (1 << (device->chipset & 0x0f)));
16}
17
18void nv40_grctx_init(struct nouveau_device *, u32 *size);
19void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
new file mode 100644
index 000000000000..ab3b9dcaf478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -0,0 +1,888 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/engctx.h>
29#include <core/enum.h>
30
31#include <subdev/fb.h>
32#include <subdev/vm.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
36#include <engine/graph.h>
37
38#include "nv50.h"
39
40struct nv50_graph_priv {
41 struct nouveau_graph base;
42 spinlock_t lock;
43 u32 size;
44};
45
46struct nv50_graph_chan {
47 struct nouveau_graph_chan base;
48};
49
50/*******************************************************************************
51 * Graphics object classes
52 ******************************************************************************/
53
54static int
55nv50_graph_object_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nouveau_gpuobj *obj;
61 int ret;
62
63 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
64 16, 16, 0, &obj);
65 *pobject = nv_object(obj);
66 if (ret)
67 return ret;
68
69 nv_wo32(obj, 0x00, nv_mclass(obj));
70 nv_wo32(obj, 0x04, 0x00000000);
71 nv_wo32(obj, 0x08, 0x00000000);
72 nv_wo32(obj, 0x0c, 0x00000000);
73 return 0;
74}
75
76static struct nouveau_ofuncs
77nv50_graph_ofuncs = {
78 .ctor = nv50_graph_object_ctor,
79 .dtor = _nouveau_gpuobj_dtor,
80 .init = _nouveau_gpuobj_init,
81 .fini = _nouveau_gpuobj_fini,
82 .rd32 = _nouveau_gpuobj_rd32,
83 .wr32 = _nouveau_gpuobj_wr32,
84};
85
86static struct nouveau_oclass
87nv50_graph_sclass[] = {
88 { 0x0030, &nv50_graph_ofuncs },
89 { 0x502d, &nv50_graph_ofuncs },
90 { 0x5039, &nv50_graph_ofuncs },
91 { 0x5097, &nv50_graph_ofuncs },
92 { 0x50c0, &nv50_graph_ofuncs },
93 {}
94};
95
96static struct nouveau_oclass
97nv84_graph_sclass[] = {
98 { 0x0030, &nv50_graph_ofuncs },
99 { 0x502d, &nv50_graph_ofuncs },
100 { 0x5039, &nv50_graph_ofuncs },
101 { 0x50c0, &nv50_graph_ofuncs },
102 { 0x8297, &nv50_graph_ofuncs },
103 {}
104};
105
106static struct nouveau_oclass
107nva0_graph_sclass[] = {
108 { 0x0030, &nv50_graph_ofuncs },
109 { 0x502d, &nv50_graph_ofuncs },
110 { 0x5039, &nv50_graph_ofuncs },
111 { 0x50c0, &nv50_graph_ofuncs },
112 { 0x8397, &nv50_graph_ofuncs },
113 {}
114};
115
116static struct nouveau_oclass
117nva3_graph_sclass[] = {
118 { 0x0030, &nv50_graph_ofuncs },
119 { 0x502d, &nv50_graph_ofuncs },
120 { 0x5039, &nv50_graph_ofuncs },
121 { 0x50c0, &nv50_graph_ofuncs },
122 { 0x8597, &nv50_graph_ofuncs },
123 { 0x85c0, &nv50_graph_ofuncs },
124 {}
125};
126
127static struct nouveau_oclass
128nvaf_graph_sclass[] = {
129 { 0x0030, &nv50_graph_ofuncs },
130 { 0x502d, &nv50_graph_ofuncs },
131 { 0x5039, &nv50_graph_ofuncs },
132 { 0x50c0, &nv50_graph_ofuncs },
133 { 0x85c0, &nv50_graph_ofuncs },
134 { 0x8697, &nv50_graph_ofuncs },
135 {}
136};
137
138/*******************************************************************************
139 * PGRAPH context
140 ******************************************************************************/
141
142static int
143nv50_graph_context_ctor(struct nouveau_object *parent,
144 struct nouveau_object *engine,
145 struct nouveau_oclass *oclass, void *data, u32 size,
146 struct nouveau_object **pobject)
147{
148 struct nv50_graph_priv *priv = (void *)engine;
149 struct nv50_graph_chan *chan;
150 int ret;
151
152 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
153 priv->size, 0,
154 NVOBJ_FLAG_ZERO_ALLOC, &chan);
155 *pobject = nv_object(chan);
156 if (ret)
157 return ret;
158
159 nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
160 return 0;
161}
162
163static struct nouveau_oclass
164nv50_graph_cclass = {
165 .handle = NV_ENGCTX(GR, 0x50),
166 .ofuncs = &(struct nouveau_ofuncs) {
167 .ctor = nv50_graph_context_ctor,
168 .dtor = _nouveau_graph_context_dtor,
169 .init = _nouveau_graph_context_init,
170 .fini = _nouveau_graph_context_fini,
171 .rd32 = _nouveau_graph_context_rd32,
172 .wr32 = _nouveau_graph_context_wr32,
173 },
174};
175
176/*******************************************************************************
177 * PGRAPH engine/subdev functions
178 ******************************************************************************/
179
180static int
181nv50_graph_tlb_flush(struct nouveau_engine *engine)
182{
183 nv50_vm_flush_engine(&engine->base, 0x00);
184 return 0;
185}
186
187static int
188nv84_graph_tlb_flush(struct nouveau_engine *engine)
189{
190 struct nouveau_timer *ptimer = nouveau_timer(engine);
191 struct nv50_graph_priv *priv = (void *)engine;
192 bool idle, timeout = false;
193 unsigned long flags;
194 u64 start;
195 u32 tmp;
196
197 spin_lock_irqsave(&priv->lock, flags);
198 nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
199
200 start = ptimer->read(ptimer);
201 do {
202 idle = true;
203
204 for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
205 if ((tmp & 7) == 1)
206 idle = false;
207 }
208
209 for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
210 if ((tmp & 7) == 1)
211 idle = false;
212 }
213
214 for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
215 if ((tmp & 7) == 1)
216 idle = false;
217 }
218 } while (!idle &&
219 !(timeout = ptimer->read(ptimer) - start > 2000000000));
220
221 if (timeout) {
222 nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
223 "0x%08x 0x%08x 0x%08x 0x%08x\n",
224 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
225 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
226 }
227
228 nv50_vm_flush_engine(&engine->base, 0x00);
229
230 nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
231 spin_unlock_irqrestore(&priv->lock, flags);
232 return timeout ? -EBUSY : 0;
233}
234
235static const struct nouveau_enum nv50_mp_exec_error_names[] = {
236 { 3, "STACK_UNDERFLOW", NULL },
237 { 4, "QUADON_ACTIVE", NULL },
238 { 8, "TIMEOUT", NULL },
239 { 0x10, "INVALID_OPCODE", NULL },
240 { 0x40, "BREAKPOINT", NULL },
241 {}
242};
243
244static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
245 { 0x00000001, "NOTIFY" },
246 { 0x00000002, "IN" },
247 { 0x00000004, "OUT" },
248 {}
249};
250
251static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
252 { 0x00000001, "FAULT" },
253 {}
254};
255
256static const struct nouveau_bitfield nv50_graph_trap_strmout[] = {
257 { 0x00000001, "FAULT" },
258 {}
259};
260
261static const struct nouveau_bitfield nv50_graph_trap_ccache[] = {
262 { 0x00000001, "FAULT" },
263 {}
264};
265
266/* There must be a *lot* of these. Will take some time to gather them up. */
267const struct nouveau_enum nv50_data_error_names[] = {
268 { 0x00000003, "INVALID_OPERATION", NULL },
269 { 0x00000004, "INVALID_VALUE", NULL },
270 { 0x00000005, "INVALID_ENUM", NULL },
271 { 0x00000008, "INVALID_OBJECT", NULL },
272 { 0x00000009, "READ_ONLY_OBJECT", NULL },
273 { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
274 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
275 { 0x0000000c, "INVALID_BITFIELD", NULL },
276 { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
277 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
278 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
279 { 0x00000010, "RT_DOUBLE_BIND", NULL },
280 { 0x00000011, "RT_TYPES_MISMATCH", NULL },
281 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
282 { 0x00000015, "FP_TOO_FEW_REGS", NULL },
283 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
284 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
285 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
286 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
287 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
288 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
289 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
290 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
291 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
292 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
293 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
294 { 0x00000024, "VP_ZERO_INPUTS", NULL },
295 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
296 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
297 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
298 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
299 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
300 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
301 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
302 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
303 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
304 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
305 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
306 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
307 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
308 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
309 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
310 {}
311};
312
313static const struct nouveau_bitfield nv50_graph_intr_name[] = {
314 { 0x00000001, "NOTIFY" },
315 { 0x00000002, "COMPUTE_QUERY" },
316 { 0x00000010, "ILLEGAL_MTHD" },
317 { 0x00000020, "ILLEGAL_CLASS" },
318 { 0x00000040, "DOUBLE_NOTIFY" },
319 { 0x00001000, "CONTEXT_SWITCH" },
320 { 0x00010000, "BUFFER_NOTIFY" },
321 { 0x00100000, "DATA_ERROR" },
322 { 0x00200000, "TRAP" },
323 { 0x01000000, "SINGLE_STEP" },
324 {}
325};
326
327static void
328nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
329{
330 u32 units = nv_rd32(priv, 0x1540);
331 u32 addr, mp10, status, pc, oplow, ophigh;
332 int i;
333 int mps = 0;
334 for (i = 0; i < 4; i++) {
335 if (!(units & 1 << (i+24)))
336 continue;
337 if (nv_device(priv)->chipset < 0xa0)
338 addr = 0x408200 + (tpid << 12) + (i << 7);
339 else
340 addr = 0x408100 + (tpid << 11) + (i << 7);
341 mp10 = nv_rd32(priv, addr + 0x10);
342 status = nv_rd32(priv, addr + 0x14);
343 if (!status)
344 continue;
345 if (display) {
346 nv_rd32(priv, addr + 0x20);
347 pc = nv_rd32(priv, addr + 0x24);
348 oplow = nv_rd32(priv, addr + 0x70);
349 ophigh = nv_rd32(priv, addr + 0x74);
350 nv_error(priv, "TRAP_MP_EXEC - "
351 "TP %d MP %d: ", tpid, i);
352 nouveau_enum_print(nv50_mp_exec_error_names, status);
353 printk(" at %06x warp %d, opcode %08x %08x\n",
354 pc&0xffffff, pc >> 24,
355 oplow, ophigh);
356 }
357 nv_wr32(priv, addr + 0x10, mp10);
358 nv_wr32(priv, addr + 0x14, 0);
359 mps++;
360 }
361 if (!mps && display)
362 nv_error(priv, "TRAP_MP_EXEC - TP %d: "
363 "No MPs claiming errors?\n", tpid);
364}
365
366static void
367nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
368 u32 ustatus_new, int display, const char *name)
369{
370 int tps = 0;
371 u32 units = nv_rd32(priv, 0x1540);
372 int i, r;
373 u32 ustatus_addr, ustatus;
374 for (i = 0; i < 16; i++) {
375 if (!(units & (1 << i)))
376 continue;
377 if (nv_device(priv)->chipset < 0xa0)
378 ustatus_addr = ustatus_old + (i << 12);
379 else
380 ustatus_addr = ustatus_new + (i << 11);
381 ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
382 if (!ustatus)
383 continue;
384 tps++;
385 switch (type) {
386 case 6: /* texture error... unknown for now */
387 if (display) {
388 nv_error(priv, "magic set %d:\n", i);
389 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
390 nv_error(priv, "\t0x%08x: 0x%08x\n", r,
391 nv_rd32(priv, r));
392 }
393 break;
394 case 7: /* MP error */
395 if (ustatus & 0x04030000) {
396 nv50_priv_mp_trap(priv, i, display);
397 ustatus &= ~0x04030000;
398 }
399 break;
400 case 8: /* TPDMA error */
401 {
402 u32 e0c = nv_rd32(priv, ustatus_addr + 4);
403 u32 e10 = nv_rd32(priv, ustatus_addr + 8);
404 u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
405 u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
406 u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
407 u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
408 u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
409 /* 2d engine destination */
410 if (ustatus & 0x00000010) {
411 if (display) {
412 nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
413 i, e14, e10);
414 nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
415 i, e0c, e18, e1c, e20, e24);
416 }
417 ustatus &= ~0x00000010;
418 }
419 /* Render target */
420 if (ustatus & 0x00000040) {
421 if (display) {
422 nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
423 i, e14, e10);
424 nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
425 i, e0c, e18, e1c, e20, e24);
426 }
427 ustatus &= ~0x00000040;
428 }
429 /* CUDA memory: l[], g[] or stack. */
430 if (ustatus & 0x00000080) {
431 if (display) {
432 if (e18 & 0x80000000) {
433 /* g[] read fault? */
434 nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
435 i, e14, e10 | ((e18 >> 24) & 0x1f));
436 e18 &= ~0x1f000000;
437 } else if (e18 & 0xc) {
438 /* g[] write fault? */
439 nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
440 i, e14, e10 | ((e18 >> 7) & 0x1f));
441 e18 &= ~0x00000f80;
442 } else {
443 nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
444 i, e14, e10);
445 }
446 nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
447 i, e0c, e18, e1c, e20, e24);
448 }
449 ustatus &= ~0x00000080;
450 }
451 }
452 break;
453 }
454 if (ustatus) {
455 if (display)
456 nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
457 }
458 nv_wr32(priv, ustatus_addr, 0xc0000000);
459 }
460
461 if (!tps && display)
462 nv_info(priv, "%s - No TPs claiming errors?\n", name);
463}
464
465static int
466nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
467 int chid, u64 inst)
468{
469 u32 status = nv_rd32(priv, 0x400108);
470 u32 ustatus;
471
472 if (!status && display) {
473 nv_error(priv, "TRAP: no units reporting traps?\n");
474 return 1;
475 }
476
477 /* DISPATCH: Relays commands to other units and handles NOTIFY,
478 * COND, QUERY. If you get a trap from it, the command is still stuck
479 * in DISPATCH and you need to do something about it. */
480 if (status & 0x001) {
481 ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
482 if (!ustatus && display) {
483 nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
484 }
485
486 nv_wr32(priv, 0x400500, 0x00000000);
487
488 /* Known to be triggered by screwed up NOTIFY and COND... */
489 if (ustatus & 0x00000001) {
490 u32 addr = nv_rd32(priv, 0x400808);
491 u32 subc = (addr & 0x00070000) >> 16;
492 u32 mthd = (addr & 0x00001ffc);
493 u32 datal = nv_rd32(priv, 0x40080c);
494 u32 datah = nv_rd32(priv, 0x400810);
495 u32 class = nv_rd32(priv, 0x400814);
496 u32 r848 = nv_rd32(priv, 0x400848);
497
498 nv_error(priv, "TRAP DISPATCH_FAULT\n");
499 if (display && (addr & 0x80000000)) {
500 nv_error(priv, "ch %d [0x%010llx] "
501 "subc %d class 0x%04x mthd 0x%04x "
502 "data 0x%08x%08x "
503 "400808 0x%08x 400848 0x%08x\n",
504 chid, inst, subc, class, mthd, datah,
505 datal, addr, r848);
506 } else
507 if (display) {
508 nv_error(priv, "no stuck command?\n");
509 }
510
511 nv_wr32(priv, 0x400808, 0);
512 nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
513 nv_wr32(priv, 0x400848, 0);
514 ustatus &= ~0x00000001;
515 }
516
517 if (ustatus & 0x00000002) {
518 u32 addr = nv_rd32(priv, 0x40084c);
519 u32 subc = (addr & 0x00070000) >> 16;
520 u32 mthd = (addr & 0x00001ffc);
521 u32 data = nv_rd32(priv, 0x40085c);
522 u32 class = nv_rd32(priv, 0x400814);
523
524 nv_error(priv, "TRAP DISPATCH_QUERY\n");
525 if (display && (addr & 0x80000000)) {
526 nv_error(priv, "ch %d [0x%010llx] "
527 "subc %d class 0x%04x mthd 0x%04x "
528 "data 0x%08x 40084c 0x%08x\n",
529 chid, inst, subc, class, mthd,
530 data, addr);
531 } else
532 if (display) {
533 nv_error(priv, "no stuck command?\n");
534 }
535
536 nv_wr32(priv, 0x40084c, 0);
537 ustatus &= ~0x00000002;
538 }
539
540 if (ustatus && display) {
541 nv_error(priv, "TRAP_DISPATCH (unknown "
542 "0x%08x)\n", ustatus);
543 }
544
545 nv_wr32(priv, 0x400804, 0xc0000000);
546 nv_wr32(priv, 0x400108, 0x001);
547 status &= ~0x001;
548 if (!status)
549 return 0;
550 }
551
552 /* M2MF: Memory to memory copy engine. */
553 if (status & 0x002) {
554 u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
555 if (display) {
556 nv_error(priv, "TRAP_M2MF");
557 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
558 printk("\n");
559 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
560 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
561 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
562
563 }
564
565 /* No sane way found yet -- just reset the bugger. */
566 nv_wr32(priv, 0x400040, 2);
567 nv_wr32(priv, 0x400040, 0);
568 nv_wr32(priv, 0x406800, 0xc0000000);
569 nv_wr32(priv, 0x400108, 0x002);
570 status &= ~0x002;
571 }
572
573 /* VFETCH: Fetches data from vertex buffers. */
574 if (status & 0x004) {
575 u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
576 if (display) {
577 nv_error(priv, "TRAP_VFETCH");
578 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
579 printk("\n");
580 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
581 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
582 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
583 }
584
585 nv_wr32(priv, 0x400c04, 0xc0000000);
586 nv_wr32(priv, 0x400108, 0x004);
587 status &= ~0x004;
588 }
589
590 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
591 if (status & 0x008) {
592 ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
593 if (display) {
594 nv_error(priv, "TRAP_STRMOUT");
595 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
596 printk("\n");
597 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
598 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
599 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
600
601 }
602
603 /* No sane way found yet -- just reset the bugger. */
604 nv_wr32(priv, 0x400040, 0x80);
605 nv_wr32(priv, 0x400040, 0);
606 nv_wr32(priv, 0x401800, 0xc0000000);
607 nv_wr32(priv, 0x400108, 0x008);
608 status &= ~0x008;
609 }
610
611 /* CCACHE: Handles code and c[] caches and fills them. */
612 if (status & 0x010) {
613 ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
614 if (display) {
615 nv_error(priv, "TRAP_CCACHE");
616 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
617 printk("\n");
618 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
619 " %08x %08x %08x\n",
620 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
621 nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
622 nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
623 nv_rd32(priv, 0x40501c));
624
625 }
626
627 nv_wr32(priv, 0x405018, 0xc0000000);
628 nv_wr32(priv, 0x400108, 0x010);
629 status &= ~0x010;
630 }
631
632 /* Unknown, not seen yet... 0x402000 is the only trap status reg
633 * remaining, so try to handle it anyway. Perhaps related to that
634 * unknown DMA slot on tesla? */
635 if (status & 0x20) {
636 ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
637 if (display)
638 nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
639 nv_wr32(priv, 0x402000, 0xc0000000);
640 /* no status modifiction on purpose */
641 }
642
643 /* TEXTURE: CUDA texturing units */
644 if (status & 0x040) {
645 nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
646 "TRAP_TEXTURE");
647 nv_wr32(priv, 0x400108, 0x040);
648 status &= ~0x040;
649 }
650
651 /* MP: CUDA execution engines. */
652 if (status & 0x080) {
653 nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
654 "TRAP_MP");
655 nv_wr32(priv, 0x400108, 0x080);
656 status &= ~0x080;
657 }
658
659 /* TPDMA: Handles TP-initiated uncached memory accesses:
660 * l[], g[], stack, 2d surfaces, render targets. */
661 if (status & 0x100) {
662 nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
663 "TRAP_TPDMA");
664 nv_wr32(priv, 0x400108, 0x100);
665 status &= ~0x100;
666 }
667
668 if (status) {
669 if (display)
670 nv_error(priv, "TRAP: unknown 0x%08x\n", status);
671 nv_wr32(priv, 0x400108, status);
672 }
673
674 return 1;
675}
676
677static void
678nv50_graph_intr(struct nouveau_subdev *subdev)
679{
680 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
681 struct nouveau_engine *engine = nv_engine(subdev);
682 struct nouveau_object *engctx;
683 struct nouveau_handle *handle = NULL;
684 struct nv50_graph_priv *priv = (void *)subdev;
685 u32 stat = nv_rd32(priv, 0x400100);
686 u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
687 u32 addr = nv_rd32(priv, 0x400704);
688 u32 subc = (addr & 0x00070000) >> 16;
689 u32 mthd = (addr & 0x00001ffc);
690 u32 data = nv_rd32(priv, 0x400708);
691 u32 class = nv_rd32(priv, 0x400814);
692 u32 show = stat;
693 int chid;
694
695 engctx = nouveau_engctx_get(engine, inst);
696 chid = pfifo->chid(pfifo, engctx);
697
698 if (stat & 0x00000010) {
699 handle = nouveau_handle_get_class(engctx, class);
700 if (handle && !nv_call(handle->object, mthd, data))
701 show &= ~0x00000010;
702 nouveau_handle_put(handle);
703 }
704
705 if (show & 0x00100000) {
706 u32 ecode = nv_rd32(priv, 0x400110);
707 nv_error(priv, "DATA_ERROR ");
708 nouveau_enum_print(nv50_data_error_names, ecode);
709 printk("\n");
710 }
711
712 if (stat & 0x00200000) {
713 if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
714 show &= ~0x00200000;
715 }
716
717 nv_wr32(priv, 0x400100, stat);
718 nv_wr32(priv, 0x400500, 0x00010001);
719
720 if (show) {
721 nv_info(priv, "");
722 nouveau_bitfield_print(nv50_graph_intr_name, show);
723 printk("\n");
724 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
725 "mthd 0x%04x data 0x%08x\n",
726 chid, (u64)inst << 12, subc, class, mthd, data);
727 nv50_fb_trap(nouveau_fb(priv), 1);
728 }
729
730 if (nv_rd32(priv, 0x400824) & (1 << 31))
731 nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
732
733 nouveau_engctx_put(engctx);
734}
735
736static int
737nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
738 struct nouveau_oclass *oclass, void *data, u32 size,
739 struct nouveau_object **pobject)
740{
741 struct nv50_graph_priv *priv;
742 int ret;
743
744 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
745 *pobject = nv_object(priv);
746 if (ret)
747 return ret;
748
749 nv_subdev(priv)->unit = 0x00201000;
750 nv_subdev(priv)->intr = nv50_graph_intr;
751 nv_engine(priv)->cclass = &nv50_graph_cclass;
752
753 switch (nv_device(priv)->chipset) {
754 case 0x50:
755 nv_engine(priv)->sclass = nv50_graph_sclass;
756 break;
757 case 0x84:
758 case 0x86:
759 case 0x92:
760 case 0x94:
761 case 0x96:
762 case 0x98:
763 nv_engine(priv)->sclass = nv84_graph_sclass;
764 break;
765 case 0xa0:
766 case 0xaa:
767 case 0xac:
768 nv_engine(priv)->sclass = nva0_graph_sclass;
769 break;
770 case 0xa3:
771 case 0xa5:
772 case 0xa8:
773 nv_engine(priv)->sclass = nva3_graph_sclass;
774 break;
775 case 0xaf:
776 nv_engine(priv)->sclass = nvaf_graph_sclass;
777 break;
778
779 };
780
781 if (nv_device(priv)->chipset == 0x50 ||
782 nv_device(priv)->chipset == 0xac)
783 nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
784 else
785 nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
786
787 spin_lock_init(&priv->lock);
788 return 0;
789}
790
791static int
792nv50_graph_init(struct nouveau_object *object)
793{
794 struct nv50_graph_priv *priv = (void *)object;
795 int ret, units, i;
796
797 ret = nouveau_graph_init(&priv->base);
798 if (ret)
799 return ret;
800
801 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
802 nv_wr32(priv, 0x40008c, 0x00000004);
803
804 /* reset/enable traps and interrupts */
805 nv_wr32(priv, 0x400804, 0xc0000000);
806 nv_wr32(priv, 0x406800, 0xc0000000);
807 nv_wr32(priv, 0x400c04, 0xc0000000);
808 nv_wr32(priv, 0x401800, 0xc0000000);
809 nv_wr32(priv, 0x405018, 0xc0000000);
810 nv_wr32(priv, 0x402000, 0xc0000000);
811
812 units = nv_rd32(priv, 0x001540);
813 for (i = 0; i < 16; i++) {
814 if (!(units & (1 << i)))
815 continue;
816
817 if (nv_device(priv)->chipset < 0xa0) {
818 nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
819 nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
820 nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
821 } else {
822 nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
823 nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
824 nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
825 }
826 }
827
828 nv_wr32(priv, 0x400108, 0xffffffff);
829 nv_wr32(priv, 0x400138, 0xffffffff);
830 nv_wr32(priv, 0x400100, 0xffffffff);
831 nv_wr32(priv, 0x40013c, 0xffffffff);
832 nv_wr32(priv, 0x400500, 0x00010001);
833
834 /* upload context program, initialise ctxctl defaults */
835 ret = nv50_grctx_init(nv_device(priv), &priv->size);
836 if (ret)
837 return ret;
838
839 nv_wr32(priv, 0x400824, 0x00000000);
840 nv_wr32(priv, 0x400828, 0x00000000);
841 nv_wr32(priv, 0x40082c, 0x00000000);
842 nv_wr32(priv, 0x400830, 0x00000000);
843 nv_wr32(priv, 0x400724, 0x00000000);
844 nv_wr32(priv, 0x40032c, 0x00000000);
845 nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
846
847 /* some unknown zcull magic */
848 switch (nv_device(priv)->chipset & 0xf0) {
849 case 0x50:
850 case 0x80:
851 case 0x90:
852 nv_wr32(priv, 0x402ca8, 0x00000800);
853 break;
854 case 0xa0:
855 default:
856 nv_wr32(priv, 0x402cc0, 0x00000000);
857 if (nv_device(priv)->chipset == 0xa0 ||
858 nv_device(priv)->chipset == 0xaa ||
859 nv_device(priv)->chipset == 0xac) {
860 nv_wr32(priv, 0x402ca8, 0x00000802);
861 } else {
862 nv_wr32(priv, 0x402cc0, 0x00000000);
863 nv_wr32(priv, 0x402ca8, 0x00000002);
864 }
865
866 break;
867 }
868
869 /* zero out zcull regions */
870 for (i = 0; i < 8; i++) {
871 nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
872 nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
873 nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
874 nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
875 }
876 return 0;
877}
878
879struct nouveau_oclass
880nv50_graph_oclass = {
881 .handle = NV_ENGINE(GR, 0x50),
882 .ofuncs = &(struct nouveau_ofuncs) {
883 .ctor = nv50_graph_ctor,
884 .dtor = _nouveau_graph_dtor,
885 .init = nv50_graph_init,
886 .fini = _nouveau_graph_fini,
887 },
888};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 000000000000..0505fb419bde
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
1#ifndef __NV50_GRAPH_H__
2#define __NV50_GRAPH_H__
3
4int nv50_grctx_init(struct nouveau_device *, u32 *size);
5void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
6
7#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
new file mode 100644
index 000000000000..c62f2d0f5f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -0,0 +1,955 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26#include "fuc/hubnvc0.fuc.h"
27#include "fuc/gpcnvc0.fuc.h"
28
29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33static struct nouveau_oclass
34nvc0_graph_sclass[] = {
35 { 0x902d, &nouveau_object_ofuncs },
36 { 0x9039, &nouveau_object_ofuncs },
37 { 0x9097, &nouveau_object_ofuncs },
38 { 0x90c0, &nouveau_object_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nvc1_graph_sclass[] = {
44 { 0x902d, &nouveau_object_ofuncs },
45 { 0x9039, &nouveau_object_ofuncs },
46 { 0x9097, &nouveau_object_ofuncs },
47 { 0x90c0, &nouveau_object_ofuncs },
48 { 0x9197, &nouveau_object_ofuncs },
49 {}
50};
51
52static struct nouveau_oclass
53nvc8_graph_sclass[] = {
54 { 0x902d, &nouveau_object_ofuncs },
55 { 0x9039, &nouveau_object_ofuncs },
56 { 0x9097, &nouveau_object_ofuncs },
57 { 0x90c0, &nouveau_object_ofuncs },
58 { 0x9197, &nouveau_object_ofuncs },
59 { 0x9297, &nouveau_object_ofuncs },
60 {}
61};
62
63/*******************************************************************************
64 * PGRAPH context
65 ******************************************************************************/
66
67int
68nvc0_graph_context_ctor(struct nouveau_object *parent,
69 struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *args, u32 size,
71 struct nouveau_object **pobject)
72{
73 struct nouveau_vm *vm = nouveau_client(parent)->vm;
74 struct nvc0_graph_priv *priv = (void *)engine;
75 struct nvc0_graph_data *data = priv->mmio_data;
76 struct nvc0_graph_mmio *mmio = priv->mmio_list;
77 struct nvc0_graph_chan *chan;
78 int ret, i;
79
80 /* allocate memory for context, and fill with default values */
81 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
82 priv->size, 0x100,
83 NVOBJ_FLAG_ZERO_ALLOC, &chan);
84 *pobject = nv_object(chan);
85 if (ret)
86 return ret;
87
88 /* allocate memory for a "mmio list" buffer that's used by the HUB
89 * fuc to modify some per-context register settings on first load
90 * of the context.
91 */
92 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
93 if (ret)
94 return ret;
95
96 ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
97 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
98 &chan->mmio_vma);
99 if (ret)
100 return ret;
101
102 /* allocate buffers referenced by mmio list */
103 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
104 ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
105 0, &chan->data[i].mem);
106 if (ret)
107 return ret;
108
109 ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
110 &chan->data[i].vma);
111 if (ret)
112 return ret;
113
114 data++;
115 }
116
117 /* finally, fill in the mmio list and point the context at it */
118 for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
119 u32 addr = mmio->addr;
120 u32 data = mmio->data;
121
122 if (mmio->shift) {
123 u64 info = chan->data[mmio->buffer].vma.offset;
124 data |= info >> mmio->shift;
125 }
126
127 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
128 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
129 mmio++;
130 }
131
132 for (i = 0; i < priv->size; i += 4)
133 nv_wo32(chan, i, priv->data[i / 4]);
134
135 if (!priv->firmware) {
136 nv_wo32(chan, 0x00, chan->mmio_nr / 2);
137 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
138 } else {
139 nv_wo32(chan, 0xf4, 0);
140 nv_wo32(chan, 0xf8, 0);
141 nv_wo32(chan, 0x10, chan->mmio_nr / 2);
142 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
143 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
144 nv_wo32(chan, 0x1c, 1);
145 nv_wo32(chan, 0x20, 0);
146 nv_wo32(chan, 0x28, 0);
147 nv_wo32(chan, 0x2c, 0);
148 }
149
150 return 0;
151}
152
153void
154nvc0_graph_context_dtor(struct nouveau_object *object)
155{
156 struct nvc0_graph_chan *chan = (void *)object;
157 int i;
158
159 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
160 nouveau_gpuobj_unmap(&chan->data[i].vma);
161 nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
162 }
163
164 nouveau_gpuobj_unmap(&chan->mmio_vma);
165 nouveau_gpuobj_ref(NULL, &chan->mmio);
166
167 nouveau_graph_context_destroy(&chan->base);
168}
169
170static struct nouveau_oclass
171nvc0_graph_cclass = {
172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nvc0_graph_context_ctor,
174 .dtor = nvc0_graph_context_dtor,
175 .init = _nouveau_graph_context_init,
176 .fini = _nouveau_graph_context_fini,
177 .rd32 = _nouveau_graph_context_rd32,
178 .wr32 = _nouveau_graph_context_wr32,
179 },
180};
181
182/*******************************************************************************
183 * PGRAPH engine/subdev functions
184 ******************************************************************************/
185
186static void
187nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
188{
189 nv_error(priv, "%06x - done 0x%08x\n", base,
190 nv_rd32(priv, base + 0x400));
191 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
192 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
193 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
194 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
195 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
196 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
197}
198
199void
200nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
201{
202 u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
203 u32 gpc;
204
205 nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
206 for (gpc = 0; gpc < gpcnr; gpc++)
207 nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
208}
209
210static void
211nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
212{
213 u32 ustat = nv_rd32(priv, 0x409c18);
214
215 if (ustat & 0x00000001)
216 nv_error(priv, "CTXCTRL ucode error\n");
217 if (ustat & 0x00080000)
218 nv_error(priv, "CTXCTRL watchdog timeout\n");
219 if (ustat & ~0x00080001)
220 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
221
222 nvc0_graph_ctxctl_debug(priv);
223 nv_wr32(priv, 0x409c20, ustat);
224}
225
226static void
227nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
228{
229 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
230
231 if (stat & 0x00000001) {
232 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
233 nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
234 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
235 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000001);
236 stat &= ~0x00000001;
237 }
238
239 if (stat & 0x00000002) {
240 u32 trap0 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0644));
241 u32 trap1 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x064c));
242 nv_error(priv, "GPC%d/TPC%d/MP: 0x%08x 0x%08x\n",
243 gpc, tpc, trap0, trap1);
244 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0644), 0x001ffffe);
245 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x064c), 0x0000000f);
246 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000002);
247 stat &= ~0x00000002;
248 }
249
250 if (stat & 0x00000004) {
251 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
252 nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
253 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
254 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000004);
255 stat &= ~0x00000004;
256 }
257
258 if (stat & 0x00000008) {
259 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
260 nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
261 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
262 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000008);
263 stat &= ~0x00000008;
264 }
265
266 if (stat) {
267 nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
268 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), stat);
269 }
270}
271
272static void
273nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
274{
275 u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
276 int tpc;
277
278 if (stat & 0x00000001) {
279 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
280 nv_error(priv, "GPC%d/PROP: 0x%08x\n", gpc, trap);
281 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
282 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000001);
283 stat &= ~0x00000001;
284 }
285
286 if (stat & 0x00000002) {
287 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
288 nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
289 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
290 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000002);
291 stat &= ~0x00000002;
292 }
293
294 if (stat & 0x00000004) {
295 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
296 nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
297 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
298 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000004);
299 stat &= ~0x00000004;
300 }
301
302 if (stat & 0x00000008) {
303 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
304 nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
305 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
306 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000008);
307 stat &= ~0x00000009;
308 }
309
310 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
311 u32 mask = 0x00010000 << tpc;
312 if (stat & mask) {
313 nvc0_graph_trap_tpc(priv, gpc, tpc);
314 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
315 stat &= ~mask;
316 }
317 }
318
319 if (stat) {
320 nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
321 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), stat);
322 }
323}
324
325static void
326nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
327{
328 u32 trap = nv_rd32(priv, 0x400108);
329 int rop, gpc;
330
331 if (trap & 0x00000001) {
332 u32 stat = nv_rd32(priv, 0x404000);
333 nv_error(priv, "DISPATCH 0x%08x\n", stat);
334 nv_wr32(priv, 0x404000, 0xc0000000);
335 nv_wr32(priv, 0x400108, 0x00000001);
336 trap &= ~0x00000001;
337 }
338
339 if (trap & 0x00000002) {
340 u32 stat = nv_rd32(priv, 0x404600);
341 nv_error(priv, "M2MF 0x%08x\n", stat);
342 nv_wr32(priv, 0x404600, 0xc0000000);
343 nv_wr32(priv, 0x400108, 0x00000002);
344 trap &= ~0x00000002;
345 }
346
347 if (trap & 0x00000008) {
348 u32 stat = nv_rd32(priv, 0x408030);
349 nv_error(priv, "CCACHE 0x%08x\n", stat);
350 nv_wr32(priv, 0x408030, 0xc0000000);
351 nv_wr32(priv, 0x400108, 0x00000008);
352 trap &= ~0x00000008;
353 }
354
355 if (trap & 0x00000010) {
356 u32 stat = nv_rd32(priv, 0x405840);
357 nv_error(priv, "SHADER 0x%08x\n", stat);
358 nv_wr32(priv, 0x405840, 0xc0000000);
359 nv_wr32(priv, 0x400108, 0x00000010);
360 trap &= ~0x00000010;
361 }
362
363 if (trap & 0x00000040) {
364 u32 stat = nv_rd32(priv, 0x40601c);
365 nv_error(priv, "UNK6 0x%08x\n", stat);
366 nv_wr32(priv, 0x40601c, 0xc0000000);
367 nv_wr32(priv, 0x400108, 0x00000040);
368 trap &= ~0x00000040;
369 }
370
371 if (trap & 0x00000080) {
372 u32 stat = nv_rd32(priv, 0x404490);
373 nv_error(priv, "MACRO 0x%08x\n", stat);
374 nv_wr32(priv, 0x404490, 0xc0000000);
375 nv_wr32(priv, 0x400108, 0x00000080);
376 trap &= ~0x00000080;
377 }
378
379 if (trap & 0x01000000) {
380 u32 stat = nv_rd32(priv, 0x400118);
381 for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
382 u32 mask = 0x00000001 << gpc;
383 if (stat & mask) {
384 nvc0_graph_trap_gpc(priv, gpc);
385 nv_wr32(priv, 0x400118, mask);
386 stat &= ~mask;
387 }
388 }
389 nv_wr32(priv, 0x400108, 0x01000000);
390 trap &= ~0x01000000;
391 }
392
393 if (trap & 0x02000000) {
394 for (rop = 0; rop < priv->rop_nr; rop++) {
395 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
396 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
397 nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
398 rop, statz, statc);
399 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
400 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
401 }
402 nv_wr32(priv, 0x400108, 0x02000000);
403 trap &= ~0x02000000;
404 }
405
406 if (trap) {
407 nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
408 nv_wr32(priv, 0x400108, trap);
409 }
410}
411
412static void
413nvc0_graph_intr(struct nouveau_subdev *subdev)
414{
415 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
416 struct nouveau_engine *engine = nv_engine(subdev);
417 struct nouveau_object *engctx;
418 struct nouveau_handle *handle;
419 struct nvc0_graph_priv *priv = (void *)subdev;
420 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
421 u32 stat = nv_rd32(priv, 0x400100);
422 u32 addr = nv_rd32(priv, 0x400704);
423 u32 mthd = (addr & 0x00003ffc);
424 u32 subc = (addr & 0x00070000) >> 16;
425 u32 data = nv_rd32(priv, 0x400708);
426 u32 code = nv_rd32(priv, 0x400110);
427 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
428 int chid;
429
430 engctx = nouveau_engctx_get(engine, inst);
431 chid = pfifo->chid(pfifo, engctx);
432
433 if (stat & 0x00000010) {
434 handle = nouveau_handle_get_class(engctx, class);
435 if (!handle || nv_call(handle->object, mthd, data)) {
436 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
437 "subc %d class 0x%04x mthd 0x%04x "
438 "data 0x%08x\n",
439 chid, inst << 12, subc, class, mthd, data);
440 }
441 nouveau_handle_put(handle);
442 nv_wr32(priv, 0x400100, 0x00000010);
443 stat &= ~0x00000010;
444 }
445
446 if (stat & 0x00000020) {
447 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
448 "class 0x%04x mthd 0x%04x data 0x%08x\n",
449 chid, inst << 12, subc, class, mthd, data);
450 nv_wr32(priv, 0x400100, 0x00000020);
451 stat &= ~0x00000020;
452 }
453
454 if (stat & 0x00100000) {
455 nv_error(priv, "DATA_ERROR [");
456 nouveau_enum_print(nv50_data_error_names, code);
457 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
458 "mthd 0x%04x data 0x%08x\n",
459 chid, inst << 12, subc, class, mthd, data);
460 nv_wr32(priv, 0x400100, 0x00100000);
461 stat &= ~0x00100000;
462 }
463
464 if (stat & 0x00200000) {
465 nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
466 nvc0_graph_trap_intr(priv);
467 nv_wr32(priv, 0x400100, 0x00200000);
468 stat &= ~0x00200000;
469 }
470
471 if (stat & 0x00080000) {
472 nvc0_graph_ctxctl_isr(priv);
473 nv_wr32(priv, 0x400100, 0x00080000);
474 stat &= ~0x00080000;
475 }
476
477 if (stat) {
478 nv_error(priv, "unknown stat 0x%08x\n", stat);
479 nv_wr32(priv, 0x400100, stat);
480 }
481
482 nv_wr32(priv, 0x400500, 0x00010001);
483 nouveau_engctx_put(engctx);
484}
485
486int
487nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
488 struct nvc0_graph_fuc *fuc)
489{
490 struct nouveau_device *device = nv_device(priv);
491 const struct firmware *fw;
492 char f[32];
493 int ret;
494
495 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
496 ret = request_firmware(&fw, f, &device->pdev->dev);
497 if (ret) {
498 snprintf(f, sizeof(f), "nouveau/%s", fwname);
499 ret = request_firmware(&fw, f, &device->pdev->dev);
500 if (ret) {
501 nv_error(priv, "failed to load %s\n", fwname);
502 return ret;
503 }
504 }
505
506 fuc->size = fw->size;
507 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
508 release_firmware(fw);
509 return (fuc->data != NULL) ? 0 : -ENOMEM;
510}
511
512static int
513nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
514 struct nouveau_oclass *oclass, void *data, u32 size,
515 struct nouveau_object **pobject)
516{
517 struct nouveau_device *device = nv_device(parent);
518 struct nvc0_graph_priv *priv;
519 bool enable = true;
520 int ret, i;
521
522 switch (device->chipset) {
523 case 0xd9: /* known broken without binary driver firmware */
524 enable = false;
525 break;
526 default:
527 break;
528 }
529
530 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
531 *pobject = nv_object(priv);
532 if (ret)
533 return ret;
534
535 nv_subdev(priv)->unit = 0x18001000;
536 nv_subdev(priv)->intr = nvc0_graph_intr;
537 nv_engine(priv)->cclass = &nvc0_graph_cclass;
538
539 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
540 nv_info(priv, "using external firmware\n");
541 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
542 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
543 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
544 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
545 return -EINVAL;
546 priv->firmware = true;
547 }
548
549 switch (nvc0_graph_class(priv)) {
550 case 0x9097:
551 nv_engine(priv)->sclass = nvc0_graph_sclass;
552 break;
553 case 0x9197:
554 nv_engine(priv)->sclass = nvc1_graph_sclass;
555 break;
556 case 0x9297:
557 nv_engine(priv)->sclass = nvc8_graph_sclass;
558 break;
559 }
560
561 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
562 if (ret)
563 return ret;
564
565 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
566 if (ret)
567 return ret;
568
569 for (i = 0; i < 0x1000; i += 4) {
570 nv_wo32(priv->unk4188b4, i, 0x00000010);
571 nv_wo32(priv->unk4188b8, i, 0x00000010);
572 }
573
574 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
575 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
576 for (i = 0; i < priv->gpc_nr; i++) {
577 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
578 priv->tpc_total += priv->tpc_nr[i];
579 }
580
581 /*XXX: these need figuring out... though it might not even matter */
582 switch (nv_device(priv)->chipset) {
583 case 0xc0:
584 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
585 priv->magic_not_rop_nr = 0x07;
586 } else
587 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
588 priv->magic_not_rop_nr = 0x05;
589 } else
590 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
591 priv->magic_not_rop_nr = 0x06;
592 }
593 break;
594 case 0xc3: /* 450, 4/0/0/0, 2 */
595 priv->magic_not_rop_nr = 0x03;
596 break;
597 case 0xc4: /* 460, 3/4/0/0, 4 */
598 priv->magic_not_rop_nr = 0x01;
599 break;
600 case 0xc1: /* 2/0/0/0, 1 */
601 priv->magic_not_rop_nr = 0x01;
602 break;
603 case 0xc8: /* 4/4/3/4, 5 */
604 priv->magic_not_rop_nr = 0x06;
605 break;
606 case 0xce: /* 4/4/0/0, 4 */
607 priv->magic_not_rop_nr = 0x03;
608 break;
609 case 0xcf: /* 4/0/0/0, 3 */
610 priv->magic_not_rop_nr = 0x03;
611 break;
612 case 0xd9: /* 1/0/0/0, 1 */
613 priv->magic_not_rop_nr = 0x01;
614 break;
615 }
616
617 return 0;
618}
619
620static void
621nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
622{
623 if (fuc->data) {
624 kfree(fuc->data);
625 fuc->data = NULL;
626 }
627}
628
629void
630nvc0_graph_dtor(struct nouveau_object *object)
631{
632 struct nvc0_graph_priv *priv = (void *)object;
633
634 if (priv->data)
635 kfree(priv->data);
636
637 nvc0_graph_dtor_fw(&priv->fuc409c);
638 nvc0_graph_dtor_fw(&priv->fuc409d);
639 nvc0_graph_dtor_fw(&priv->fuc41ac);
640 nvc0_graph_dtor_fw(&priv->fuc41ad);
641
642 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
643 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
644
645 nouveau_graph_destroy(&priv->base);
646}
647
648static void
649nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
650{
651 int i;
652
653 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
654 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
655 for (i = 0; i < 4; i++)
656 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
657 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
658 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
659}
660
661static void
662nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
663{
664 nv_wr32(priv, 0x400080, 0x003083c2);
665 nv_wr32(priv, 0x400088, 0x00006fe7);
666 nv_wr32(priv, 0x40008c, 0x00000000);
667 nv_wr32(priv, 0x400090, 0x00000030);
668 nv_wr32(priv, 0x40013c, 0x013901f7);
669 nv_wr32(priv, 0x400140, 0x00000100);
670 nv_wr32(priv, 0x400144, 0x00000000);
671 nv_wr32(priv, 0x400148, 0x00000110);
672 nv_wr32(priv, 0x400138, 0x00000000);
673 nv_wr32(priv, 0x400130, 0x00000000);
674 nv_wr32(priv, 0x400134, 0x00000000);
675 nv_wr32(priv, 0x400124, 0x00000002);
676}
677
678static void
679nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
680{
681 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
682 u32 data[TPC_MAX / 8];
683 u8 tpcnr[GPC_MAX];
684 int i, gpc, tpc;
685
686 nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
687
688 /*
689 * TP ROP UNKVAL(magic_not_rop_nr)
690 * 450: 4/0/0/0 2 3
691 * 460: 3/4/0/0 4 1
692 * 465: 3/4/4/0 4 7
693 * 470: 3/3/4/4 5 5
694 * 480: 3/4/4/4 6 6
695 */
696
697 memset(data, 0x00, sizeof(data));
698 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
699 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
700 do {
701 gpc = (gpc + 1) % priv->gpc_nr;
702 } while (!tpcnr[gpc]);
703 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
704
705 data[i / 8] |= tpc << ((i % 8) * 4);
706 }
707
708 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
709 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
710 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
711 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
712
713 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
714 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
715 priv->tpc_nr[gpc]);
716 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
717 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
718 }
719
720 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
721 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
722}
723
724static void
725nvc0_graph_init_units(struct nvc0_graph_priv *priv)
726{
727 nv_wr32(priv, 0x409c24, 0x000f0000);
728 nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
729 nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
730 nv_wr32(priv, 0x408030, 0xc0000000);
731 nv_wr32(priv, 0x40601c, 0xc0000000);
732 nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
733 nv_wr32(priv, 0x406018, 0xc0000000);
734 nv_wr32(priv, 0x405840, 0xc0000000);
735 nv_wr32(priv, 0x405844, 0x00ffffff);
736 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
737 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
738}
739
740static void
741nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
742{
743 int gpc, tpc;
744
745 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
746 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
747 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
748 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
749 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
750 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
751 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
752 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
753 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
754 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
755 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
756 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
757 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
758 }
759 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
760 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
761 }
762}
763
764static void
765nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
766{
767 int rop;
768
769 for (rop = 0; rop < priv->rop_nr; rop++) {
770 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
771 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
772 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
773 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
774 }
775}
776
777void
778nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
779 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
780{
781 int i;
782
783 nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
784 for (i = 0; i < data->size / 4; i++)
785 nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
786
787 nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
788 for (i = 0; i < code->size / 4; i++) {
789 if ((i & 0x3f) == 0)
790 nv_wr32(priv, fuc_base + 0x0188, i >> 6);
791 nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
792 }
793}
794
795static int
796nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
797{
798 u32 r000260;
799 int i;
800
801 if (priv->firmware) {
802 /* load fuc microcode */
803 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
804 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
805 &priv->fuc409d);
806 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
807 &priv->fuc41ad);
808 nv_wr32(priv, 0x000260, r000260);
809
810 /* start both of them running */
811 nv_wr32(priv, 0x409840, 0xffffffff);
812 nv_wr32(priv, 0x41a10c, 0x00000000);
813 nv_wr32(priv, 0x40910c, 0x00000000);
814 nv_wr32(priv, 0x41a100, 0x00000002);
815 nv_wr32(priv, 0x409100, 0x00000002);
816 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
817 nv_info(priv, "0x409800 wait failed\n");
818
819 nv_wr32(priv, 0x409840, 0xffffffff);
820 nv_wr32(priv, 0x409500, 0x7fffffff);
821 nv_wr32(priv, 0x409504, 0x00000021);
822
823 nv_wr32(priv, 0x409840, 0xffffffff);
824 nv_wr32(priv, 0x409500, 0x00000000);
825 nv_wr32(priv, 0x409504, 0x00000010);
826 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
827 nv_error(priv, "fuc09 req 0x10 timeout\n");
828 return -EBUSY;
829 }
830 priv->size = nv_rd32(priv, 0x409800);
831
832 nv_wr32(priv, 0x409840, 0xffffffff);
833 nv_wr32(priv, 0x409500, 0x00000000);
834 nv_wr32(priv, 0x409504, 0x00000016);
835 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
836 nv_error(priv, "fuc09 req 0x16 timeout\n");
837 return -EBUSY;
838 }
839
840 nv_wr32(priv, 0x409840, 0xffffffff);
841 nv_wr32(priv, 0x409500, 0x00000000);
842 nv_wr32(priv, 0x409504, 0x00000025);
843 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
844 nv_error(priv, "fuc09 req 0x25 timeout\n");
845 return -EBUSY;
846 }
847
848 if (priv->data == NULL) {
849 int ret = nvc0_grctx_generate(priv);
850 if (ret) {
851 nv_error(priv, "failed to construct context\n");
852 return ret;
853 }
854 }
855
856 return 0;
857 }
858
859 /* load HUB microcode */
860 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
861 nv_wr32(priv, 0x4091c0, 0x01000000);
862 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
863 nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
864
865 nv_wr32(priv, 0x409180, 0x01000000);
866 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
867 if ((i & 0x3f) == 0)
868 nv_wr32(priv, 0x409188, i >> 6);
869 nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
870 }
871
872 /* load GPC microcode */
873 nv_wr32(priv, 0x41a1c0, 0x01000000);
874 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
875 nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
876
877 nv_wr32(priv, 0x41a180, 0x01000000);
878 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
879 if ((i & 0x3f) == 0)
880 nv_wr32(priv, 0x41a188, i >> 6);
881 nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
882 }
883 nv_wr32(priv, 0x000260, r000260);
884
885 /* start HUB ucode running, it'll init the GPCs */
886 nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
887 nv_wr32(priv, 0x40910c, 0x00000000);
888 nv_wr32(priv, 0x409100, 0x00000002);
889 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
890 nv_error(priv, "HUB_INIT timed out\n");
891 nvc0_graph_ctxctl_debug(priv);
892 return -EBUSY;
893 }
894
895 priv->size = nv_rd32(priv, 0x409804);
896 if (priv->data == NULL) {
897 int ret = nvc0_grctx_generate(priv);
898 if (ret) {
899 nv_error(priv, "failed to construct context\n");
900 return ret;
901 }
902 }
903
904 return 0;
905}
906
907static int
908nvc0_graph_init(struct nouveau_object *object)
909{
910 struct nvc0_graph_priv *priv = (void *)object;
911 int ret;
912
913 ret = nouveau_graph_init(&priv->base);
914 if (ret)
915 return ret;
916
917 nvc0_graph_init_obj418880(priv);
918 nvc0_graph_init_regs(priv);
919 /*nvc0_graph_init_unitplemented_magics(priv);*/
920 nvc0_graph_init_gpc_0(priv);
921 /*nvc0_graph_init_unitplemented_c242(priv);*/
922
923 nv_wr32(priv, 0x400500, 0x00010001);
924 nv_wr32(priv, 0x400100, 0xffffffff);
925 nv_wr32(priv, 0x40013c, 0xffffffff);
926
927 nvc0_graph_init_units(priv);
928 nvc0_graph_init_gpc_1(priv);
929 nvc0_graph_init_rop(priv);
930
931 nv_wr32(priv, 0x400108, 0xffffffff);
932 nv_wr32(priv, 0x400138, 0xffffffff);
933 nv_wr32(priv, 0x400118, 0xffffffff);
934 nv_wr32(priv, 0x400130, 0xffffffff);
935 nv_wr32(priv, 0x40011c, 0xffffffff);
936 nv_wr32(priv, 0x400134, 0xffffffff);
937 nv_wr32(priv, 0x400054, 0x34ce3464);
938
939 ret = nvc0_graph_init_ctxctl(priv);
940 if (ret)
941 return ret;
942
943 return 0;
944}
945
946struct nouveau_oclass
947nvc0_graph_oclass = {
948 .handle = NV_ENGINE(GR, 0xc0),
949 .ofuncs = &(struct nouveau_ofuncs) {
950 .ctor = nvc0_graph_ctor,
951 .dtor = nvc0_graph_dtor,
952 .init = nvc0_graph_init,
953 .fini = _nouveau_graph_fini,
954 },
955};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
new file mode 100644
index 000000000000..18d2210e12eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__
27
28#include <core/client.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31#include <core/option.h>
32
33#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36#include <subdev/timer.h>
37
38#include <engine/fifo.h>
39#include <engine/graph.h>
40
41#define GPC_MAX 4
42#define TPC_MAX 32
43
44#define ROP_BCAST(r) (0x408800 + (r))
45#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
46#define GPC_BCAST(r) (0x418000 + (r))
47#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
48#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
49
50struct nvc0_graph_data {
51 u32 size;
52 u32 align;
53 u32 access;
54};
55
56struct nvc0_graph_mmio {
57 u32 addr;
58 u32 data;
59 u32 shift;
60 u32 buffer;
61};
62
63struct nvc0_graph_fuc {
64 u32 *data;
65 u32 size;
66};
67
68struct nvc0_graph_priv {
69 struct nouveau_graph base;
70
71 struct nvc0_graph_fuc fuc409c;
72 struct nvc0_graph_fuc fuc409d;
73 struct nvc0_graph_fuc fuc41ac;
74 struct nvc0_graph_fuc fuc41ad;
75 bool firmware;
76
77 u8 rop_nr;
78 u8 gpc_nr;
79 u8 tpc_nr[GPC_MAX];
80 u8 tpc_total;
81
82 struct nouveau_gpuobj *unk4188b4;
83 struct nouveau_gpuobj *unk4188b8;
84
85 struct nvc0_graph_data mmio_data[4];
86 struct nvc0_graph_mmio mmio_list[4096/8];
87 u32 size;
88 u32 *data;
89
90 u8 magic_not_rop_nr;
91};
92
93struct nvc0_graph_chan {
94 struct nouveau_graph_chan base;
95
96 struct nouveau_gpuobj *mmio;
97 struct nouveau_vma mmio_vma;
98 int mmio_nr;
99 struct {
100 struct nouveau_gpuobj *mem;
101 struct nouveau_vma vma;
102 } data[4];
103};
104
105static inline u32
106nvc0_graph_class(void *obj)
107{
108 struct nouveau_device *device = nv_device(obj);
109
110 switch (device->chipset) {
111 case 0xc0:
112 case 0xc3:
113 case 0xc4:
114 case 0xce: /* guess, mmio trace shows only 0x9097 state */
115 case 0xcf: /* guess, mmio trace shows only 0x9097 state */
116 return 0x9097;
117 case 0xc1:
118 return 0x9197;
119 case 0xc8:
120 case 0xd9:
121 return 0x9297;
122 case 0xe4:
123 case 0xe7:
124 return 0xa097;
125 default:
126 return 0;
127 }
128}
129
130void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
131
132static inline void
133nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
134{
135 nv_wr32(priv, 0x40448c, data);
136 nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
137}
138
139struct nvc0_grctx {
140 struct nvc0_graph_priv *priv;
141 struct nvc0_graph_data *data;
142 struct nvc0_graph_mmio *mmio;
143 struct nouveau_gpuobj *chan;
144 int buffer_nr;
145 u64 buffer[4];
146 u64 addr;
147};
148
149int nvc0_grctx_generate(struct nvc0_graph_priv *);
150int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
151void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
152void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
153int nvc0_grctx_fini(struct nvc0_grctx *);
154
155int nve0_grctx_generate(struct nvc0_graph_priv *);
156
157#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
158#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
159
160void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
161int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
162 struct nvc0_graph_fuc *);
163void nvc0_graph_dtor(struct nouveau_object *);
164void nvc0_graph_init_fw(struct nvc0_graph_priv *, u32 base,
165 struct nvc0_graph_fuc *, struct nvc0_graph_fuc *);
166int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
167 struct nouveau_oclass *, void *, u32,
168 struct nouveau_object **);
169void nvc0_graph_context_dtor(struct nouveau_object *);
170
171#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
new file mode 100644
index 000000000000..539d4c72f192
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -0,0 +1,576 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26#include "fuc/hubnve0.fuc.h"
27#include "fuc/gpcnve0.fuc.h"
28
29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33static struct nouveau_oclass
34nve0_graph_sclass[] = {
35 { 0x902d, &nouveau_object_ofuncs },
36 { 0xa040, &nouveau_object_ofuncs },
37 { 0xa097, &nouveau_object_ofuncs },
38 { 0xa0c0, &nouveau_object_ofuncs },
39 { 0xa0b5, &nouveau_object_ofuncs },
40 {}
41};
42
43/*******************************************************************************
44 * PGRAPH context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_graph_cclass = {
49 .handle = NV_ENGCTX(GR, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nvc0_graph_context_ctor,
52 .dtor = nvc0_graph_context_dtor,
53 .init = _nouveau_graph_context_init,
54 .fini = _nouveau_graph_context_fini,
55 .rd32 = _nouveau_graph_context_rd32,
56 .wr32 = _nouveau_graph_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PGRAPH engine/subdev functions
62 ******************************************************************************/
63
64static void
65nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
66{
67 u32 ustat = nv_rd32(priv, 0x409c18);
68
69 if (ustat & 0x00000001)
70 nv_error(priv, "CTXCTRL ucode error\n");
71 if (ustat & 0x00080000)
72 nv_error(priv, "CTXCTRL watchdog timeout\n");
73 if (ustat & ~0x00080001)
74 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
75
76 nvc0_graph_ctxctl_debug(priv);
77 nv_wr32(priv, 0x409c20, ustat);
78}
79
80static void
81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
82{
83 u32 trap = nv_rd32(priv, 0x400108);
84 int rop;
85
86 if (trap & 0x00000001) {
87 u32 stat = nv_rd32(priv, 0x404000);
88 nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
89 chid, inst, stat);
90 nv_wr32(priv, 0x404000, 0xc0000000);
91 nv_wr32(priv, 0x400108, 0x00000001);
92 trap &= ~0x00000001;
93 }
94
95 if (trap & 0x00000010) {
96 u32 stat = nv_rd32(priv, 0x405840);
97 nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
98 chid, inst, stat);
99 nv_wr32(priv, 0x405840, 0xc0000000);
100 nv_wr32(priv, 0x400108, 0x00000010);
101 trap &= ~0x00000010;
102 }
103
104 if (trap & 0x02000000) {
105 for (rop = 0; rop < priv->rop_nr; rop++) {
106 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
107 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
108 nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
109 rop, chid, inst, statz, statc);
110 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
111 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
112 }
113 nv_wr32(priv, 0x400108, 0x02000000);
114 trap &= ~0x02000000;
115 }
116
117 if (trap) {
118 nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
119 chid, inst, trap);
120 nv_wr32(priv, 0x400108, trap);
121 }
122}
123
124static void
125nve0_graph_intr(struct nouveau_subdev *subdev)
126{
127 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
128 struct nouveau_engine *engine = nv_engine(subdev);
129 struct nouveau_object *engctx;
130 struct nouveau_handle *handle;
131 struct nvc0_graph_priv *priv = (void *)subdev;
132 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
133 u32 stat = nv_rd32(priv, 0x400100);
134 u32 addr = nv_rd32(priv, 0x400704);
135 u32 mthd = (addr & 0x00003ffc);
136 u32 subc = (addr & 0x00070000) >> 16;
137 u32 data = nv_rd32(priv, 0x400708);
138 u32 code = nv_rd32(priv, 0x400110);
139 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
140 int chid;
141
142 engctx = nouveau_engctx_get(engine, inst);
143 chid = pfifo->chid(pfifo, engctx);
144
145 if (stat & 0x00000010) {
146 handle = nouveau_handle_get_class(engctx, class);
147 if (!handle || nv_call(handle->object, mthd, data)) {
148 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
149 "subc %d class 0x%04x mthd 0x%04x "
150 "data 0x%08x\n",
151 chid, inst, subc, class, mthd, data);
152 }
153 nouveau_handle_put(handle);
154 nv_wr32(priv, 0x400100, 0x00000010);
155 stat &= ~0x00000010;
156 }
157
158 if (stat & 0x00000020) {
159 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
160 "class 0x%04x mthd 0x%04x data 0x%08x\n",
161 chid, inst, subc, class, mthd, data);
162 nv_wr32(priv, 0x400100, 0x00000020);
163 stat &= ~0x00000020;
164 }
165
166 if (stat & 0x00100000) {
167 nv_error(priv, "DATA_ERROR [");
168 nouveau_enum_print(nv50_data_error_names, code);
169 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
170 "mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, class, mthd, data);
172 nv_wr32(priv, 0x400100, 0x00100000);
173 stat &= ~0x00100000;
174 }
175
176 if (stat & 0x00200000) {
177 nve0_graph_trap_isr(priv, chid, inst);
178 nv_wr32(priv, 0x400100, 0x00200000);
179 stat &= ~0x00200000;
180 }
181
182 if (stat & 0x00080000) {
183 nve0_graph_ctxctl_isr(priv);
184 nv_wr32(priv, 0x400100, 0x00080000);
185 stat &= ~0x00080000;
186 }
187
188 if (stat) {
189 nv_error(priv, "unknown stat 0x%08x\n", stat);
190 nv_wr32(priv, 0x400100, stat);
191 }
192
193 nv_wr32(priv, 0x400500, 0x00010001);
194 nouveau_engctx_put(engctx);
195}
196
197static int
198nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
199 struct nouveau_oclass *oclass, void *data, u32 size,
200 struct nouveau_object **pobject)
201{
202 struct nouveau_device *device = nv_device(parent);
203 struct nvc0_graph_priv *priv;
204 int ret, i;
205
206 ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
207 *pobject = nv_object(priv);
208 if (ret)
209 return ret;
210
211 nv_subdev(priv)->unit = 0x18001000;
212 nv_subdev(priv)->intr = nve0_graph_intr;
213 nv_engine(priv)->cclass = &nve0_graph_cclass;
214 nv_engine(priv)->sclass = nve0_graph_sclass;
215
216 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
217 nv_info(priv, "using external firmware\n");
218 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
219 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
220 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
221 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
222 return -EINVAL;
223 priv->firmware = true;
224 }
225
226 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
227 if (ret)
228 return ret;
229
230 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
231 if (ret)
232 return ret;
233
234 for (i = 0; i < 0x1000; i += 4) {
235 nv_wo32(priv->unk4188b4, i, 0x00000010);
236 nv_wo32(priv->unk4188b8, i, 0x00000010);
237 }
238
239 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
240 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
241 for (i = 0; i < priv->gpc_nr; i++) {
242 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
243 priv->tpc_total += priv->tpc_nr[i];
244 }
245
246 switch (nv_device(priv)->chipset) {
247 case 0xe4:
248 if (priv->tpc_total == 8)
249 priv->magic_not_rop_nr = 3;
250 else
251 if (priv->tpc_total == 7)
252 priv->magic_not_rop_nr = 1;
253 break;
254 case 0xe7:
255 priv->magic_not_rop_nr = 1;
256 break;
257 default:
258 break;
259 }
260
261 return 0;
262}
263
264static void
265nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
266{
267 int i;
268
269 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
270 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
271 for (i = 0; i < 4; i++)
272 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
273 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
274 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
275}
276
277static void
278nve0_graph_init_regs(struct nvc0_graph_priv *priv)
279{
280 nv_wr32(priv, 0x400080, 0x003083c2);
281 nv_wr32(priv, 0x400088, 0x0001ffe7);
282 nv_wr32(priv, 0x40008c, 0x00000000);
283 nv_wr32(priv, 0x400090, 0x00000030);
284 nv_wr32(priv, 0x40013c, 0x003901f7);
285 nv_wr32(priv, 0x400140, 0x00000100);
286 nv_wr32(priv, 0x400144, 0x00000000);
287 nv_wr32(priv, 0x400148, 0x00000110);
288 nv_wr32(priv, 0x400138, 0x00000000);
289 nv_wr32(priv, 0x400130, 0x00000000);
290 nv_wr32(priv, 0x400134, 0x00000000);
291 nv_wr32(priv, 0x400124, 0x00000002);
292}
293
294static void
295nve0_graph_init_units(struct nvc0_graph_priv *priv)
296{
297 nv_wr32(priv, 0x409ffc, 0x00000000);
298 nv_wr32(priv, 0x409c14, 0x00003e3e);
299 nv_wr32(priv, 0x409c24, 0x000f0000);
300
301 nv_wr32(priv, 0x404000, 0xc0000000);
302 nv_wr32(priv, 0x404600, 0xc0000000);
303 nv_wr32(priv, 0x408030, 0xc0000000);
304 nv_wr32(priv, 0x404490, 0xc0000000);
305 nv_wr32(priv, 0x406018, 0xc0000000);
306 nv_wr32(priv, 0x407020, 0xc0000000);
307 nv_wr32(priv, 0x405840, 0xc0000000);
308 nv_wr32(priv, 0x405844, 0x00ffffff);
309
310 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
311 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
312
313}
314
315static void
316nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
317{
318 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
319 u32 data[TPC_MAX / 8];
320 u8 tpcnr[GPC_MAX];
321 int i, gpc, tpc;
322
323 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
324
325 memset(data, 0x00, sizeof(data));
326 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
327 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
328 do {
329 gpc = (gpc + 1) % priv->gpc_nr;
330 } while (!tpcnr[gpc]);
331 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
332
333 data[i / 8] |= tpc << ((i % 8) * 4);
334 }
335
336 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
337 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
338 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
339 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
340
341 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
342 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
343 priv->tpc_nr[gpc]);
344 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
345 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
346 }
347
348 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
349 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
350}
351
352static void
353nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
354{
355 int gpc, tpc;
356
357 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
358 nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
359 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
360 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
361 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
362 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
363 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
364 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
365 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
366 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
367 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
368 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
369 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
370 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
371 }
372 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
373 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
374 }
375}
376
377static void
378nve0_graph_init_rop(struct nvc0_graph_priv *priv)
379{
380 int rop;
381
382 for (rop = 0; rop < priv->rop_nr; rop++) {
383 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
384 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
385 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
386 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
387 }
388}
389
390static int
391nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
392{
393 u32 r000260;
394 int i;
395
396 if (priv->firmware) {
397 /* load fuc microcode */
398 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
399 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
400 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
401 nv_wr32(priv, 0x000260, r000260);
402
403 /* start both of them running */
404 nv_wr32(priv, 0x409840, 0xffffffff);
405 nv_wr32(priv, 0x41a10c, 0x00000000);
406 nv_wr32(priv, 0x40910c, 0x00000000);
407 nv_wr32(priv, 0x41a100, 0x00000002);
408 nv_wr32(priv, 0x409100, 0x00000002);
409 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
410 nv_error(priv, "0x409800 wait failed\n");
411
412 nv_wr32(priv, 0x409840, 0xffffffff);
413 nv_wr32(priv, 0x409500, 0x7fffffff);
414 nv_wr32(priv, 0x409504, 0x00000021);
415
416 nv_wr32(priv, 0x409840, 0xffffffff);
417 nv_wr32(priv, 0x409500, 0x00000000);
418 nv_wr32(priv, 0x409504, 0x00000010);
419 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
420 nv_error(priv, "fuc09 req 0x10 timeout\n");
421 return -EBUSY;
422 }
423 priv->size = nv_rd32(priv, 0x409800);
424
425 nv_wr32(priv, 0x409840, 0xffffffff);
426 nv_wr32(priv, 0x409500, 0x00000000);
427 nv_wr32(priv, 0x409504, 0x00000016);
428 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
429 nv_error(priv, "fuc09 req 0x16 timeout\n");
430 return -EBUSY;
431 }
432
433 nv_wr32(priv, 0x409840, 0xffffffff);
434 nv_wr32(priv, 0x409500, 0x00000000);
435 nv_wr32(priv, 0x409504, 0x00000025);
436 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
437 nv_error(priv, "fuc09 req 0x25 timeout\n");
438 return -EBUSY;
439 }
440
441 nv_wr32(priv, 0x409800, 0x00000000);
442 nv_wr32(priv, 0x409500, 0x00000001);
443 nv_wr32(priv, 0x409504, 0x00000030);
444 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
445 nv_error(priv, "fuc09 req 0x30 timeout\n");
446 return -EBUSY;
447 }
448
449 nv_wr32(priv, 0x409810, 0xb00095c8);
450 nv_wr32(priv, 0x409800, 0x00000000);
451 nv_wr32(priv, 0x409500, 0x00000001);
452 nv_wr32(priv, 0x409504, 0x00000031);
453 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
454 nv_error(priv, "fuc09 req 0x31 timeout\n");
455 return -EBUSY;
456 }
457
458 nv_wr32(priv, 0x409810, 0x00080420);
459 nv_wr32(priv, 0x409800, 0x00000000);
460 nv_wr32(priv, 0x409500, 0x00000001);
461 nv_wr32(priv, 0x409504, 0x00000032);
462 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
463 nv_error(priv, "fuc09 req 0x32 timeout\n");
464 return -EBUSY;
465 }
466
467 nv_wr32(priv, 0x409614, 0x00000070);
468 nv_wr32(priv, 0x409614, 0x00000770);
469 nv_wr32(priv, 0x40802c, 0x00000001);
470
471 if (priv->data == NULL) {
472 int ret = nve0_grctx_generate(priv);
473 if (ret) {
474 nv_error(priv, "failed to construct context\n");
475 return ret;
476 }
477 }
478
479 return 0;
480 }
481
482 /* load HUB microcode */
483 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
484 nv_wr32(priv, 0x4091c0, 0x01000000);
485 for (i = 0; i < sizeof(nve0_grhub_data) / 4; i++)
486 nv_wr32(priv, 0x4091c4, nve0_grhub_data[i]);
487
488 nv_wr32(priv, 0x409180, 0x01000000);
489 for (i = 0; i < sizeof(nve0_grhub_code) / 4; i++) {
490 if ((i & 0x3f) == 0)
491 nv_wr32(priv, 0x409188, i >> 6);
492 nv_wr32(priv, 0x409184, nve0_grhub_code[i]);
493 }
494
495 /* load GPC microcode */
496 nv_wr32(priv, 0x41a1c0, 0x01000000);
497 for (i = 0; i < sizeof(nve0_grgpc_data) / 4; i++)
498 nv_wr32(priv, 0x41a1c4, nve0_grgpc_data[i]);
499
500 nv_wr32(priv, 0x41a180, 0x01000000);
501 for (i = 0; i < sizeof(nve0_grgpc_code) / 4; i++) {
502 if ((i & 0x3f) == 0)
503 nv_wr32(priv, 0x41a188, i >> 6);
504 nv_wr32(priv, 0x41a184, nve0_grgpc_code[i]);
505 }
506 nv_wr32(priv, 0x000260, r000260);
507
508 /* start HUB ucode running, it'll init the GPCs */
509 nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
510 nv_wr32(priv, 0x40910c, 0x00000000);
511 nv_wr32(priv, 0x409100, 0x00000002);
512 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
513 nv_error(priv, "HUB_INIT timed out\n");
514 nvc0_graph_ctxctl_debug(priv);
515 return -EBUSY;
516 }
517
518 priv->size = nv_rd32(priv, 0x409804);
519 if (priv->data == NULL) {
520 int ret = nve0_grctx_generate(priv);
521 if (ret) {
522 nv_error(priv, "failed to construct context\n");
523 return ret;
524 }
525 }
526
527 return 0;
528}
529
530static int
531nve0_graph_init(struct nouveau_object *object)
532{
533 struct nvc0_graph_priv *priv = (void *)object;
534 int ret;
535
536 ret = nouveau_graph_init(&priv->base);
537 if (ret)
538 return ret;
539
540 nve0_graph_init_obj418880(priv);
541 nve0_graph_init_regs(priv);
542 nve0_graph_init_gpc_0(priv);
543
544 nv_wr32(priv, 0x400500, 0x00010001);
545 nv_wr32(priv, 0x400100, 0xffffffff);
546 nv_wr32(priv, 0x40013c, 0xffffffff);
547
548 nve0_graph_init_units(priv);
549 nve0_graph_init_gpc_1(priv);
550 nve0_graph_init_rop(priv);
551
552 nv_wr32(priv, 0x400108, 0xffffffff);
553 nv_wr32(priv, 0x400138, 0xffffffff);
554 nv_wr32(priv, 0x400118, 0xffffffff);
555 nv_wr32(priv, 0x400130, 0xffffffff);
556 nv_wr32(priv, 0x40011c, 0xffffffff);
557 nv_wr32(priv, 0x400134, 0xffffffff);
558 nv_wr32(priv, 0x400054, 0x34ce3464);
559
560 ret = nve0_graph_init_ctxctl(priv);
561 if (ret)
562 return ret;
563
564 return 0;
565}
566
567struct nouveau_oclass
568nve0_graph_oclass = {
569 .handle = NV_ENGINE(GR, 0xe0),
570 .ofuncs = &(struct nouveau_ofuncs) {
571 .ctor = nve0_graph_ctor,
572 .dtor = nvc0_graph_dtor,
573 .init = nve0_graph_init,
574 .fini = _nouveau_graph_fini,
575 },
576};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 000000000000..9c715a25cecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,269 @@
1#ifndef __NOUVEAU_GRAPH_REGS_H__
2#define __NOUVEAU_GRAPH_REGS_H__
3
4#define NV04_PGRAPH_DEBUG_0 0x00400080
5#define NV04_PGRAPH_DEBUG_1 0x00400084
6#define NV04_PGRAPH_DEBUG_2 0x00400088
7#define NV04_PGRAPH_DEBUG_3 0x0040008c
8#define NV10_PGRAPH_DEBUG_4 0x00400090
9#define NV03_PGRAPH_INTR 0x00400100
10#define NV03_PGRAPH_NSTATUS 0x00400104
11# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
12# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
13# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
14# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
15# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
16# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
17# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
18# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
19#define NV03_PGRAPH_NSOURCE 0x00400108
20# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
21# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
22# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
23# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
24# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
25# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
26# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
27# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
28# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
29# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
30# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
31# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
32# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
33# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
34# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
35# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
36# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
37# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
38# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
39#define NV03_PGRAPH_INTR_EN 0x00400140
40#define NV40_PGRAPH_INTR_EN 0x0040013C
41# define NV_PGRAPH_INTR_NOTIFY (1<<0)
42# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
43# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
44# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
45# define NV_PGRAPH_INTR_ERROR (1<<20)
46#define NV10_PGRAPH_CTX_CONTROL 0x00400144
47#define NV10_PGRAPH_CTX_USER 0x00400148
48#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
49#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
50#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
51 + 0x4*(i) + 0x20*(j))
52#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
53#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
54#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
55#define NV04_PGRAPH_CTX_CONTROL 0x00400170
56#define NV04_PGRAPH_CTX_USER 0x00400174
57#define NV04_PGRAPH_CTX_CACHE1 0x00400180
58#define NV03_PGRAPH_CTX_CONTROL 0x00400190
59#define NV03_PGRAPH_CTX_USER 0x00400194
60#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
61#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
62#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
63#define NV40_PGRAPH_CTXCTL_0304 0x00400304
64#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
65#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
66#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
67#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
68#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
69#define NV40_PGRAPH_CTXCTL_0310 0x00400310
70#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
71#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
72#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
73#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
74#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
75#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
76#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
77#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
78#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
79#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
80#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
81#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
82#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
83#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
84#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
85#define NV03_PGRAPH_ABS_X_RAM 0x00400400
86#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
87#define NV03_PGRAPH_X_MISC 0x00400500
88#define NV03_PGRAPH_Y_MISC 0x00400504
89#define NV04_PGRAPH_VALID1 0x00400508
90#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
91#define NV04_PGRAPH_MISC24_0 0x00400510
92#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
93#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
94#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
95#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
96#define NV03_PGRAPH_CLIPX_0 0x00400524
97#define NV03_PGRAPH_CLIPX_1 0x00400528
98#define NV03_PGRAPH_CLIPY_0 0x0040052C
99#define NV03_PGRAPH_CLIPY_1 0x00400530
100#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
101#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
102#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
103#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
104#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
105#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
106#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
107#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
108#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
109#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
110#define NV04_PGRAPH_MISC24_1 0x00400570
111#define NV04_PGRAPH_MISC24_2 0x00400574
112#define NV04_PGRAPH_VALID2 0x00400578
113#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
114#define NV04_PGRAPH_PASSTHRU_1 0x00400580
115#define NV04_PGRAPH_PASSTHRU_2 0x00400584
116#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
117#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
118#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
119#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
120#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
121#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
122#define NV04_PGRAPH_FORMAT_0 0x004005A8
123#define NV04_PGRAPH_FORMAT_1 0x004005AC
124#define NV04_PGRAPH_FILTER_0 0x004005B0
125#define NV04_PGRAPH_FILTER_1 0x004005B4
126#define NV03_PGRAPH_MONO_COLOR0 0x00400600
127#define NV04_PGRAPH_ROP3 0x00400604
128#define NV04_PGRAPH_BETA_AND 0x00400608
129#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
130#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
131#define NV04_PGRAPH_FORMATS 0x00400618
132#define NV10_PGRAPH_DEBUG_2 0x00400620
133#define NV04_PGRAPH_BOFFSET0 0x00400640
134#define NV04_PGRAPH_BOFFSET1 0x00400644
135#define NV04_PGRAPH_BOFFSET2 0x00400648
136#define NV04_PGRAPH_BOFFSET3 0x0040064C
137#define NV04_PGRAPH_BOFFSET4 0x00400650
138#define NV04_PGRAPH_BOFFSET5 0x00400654
139#define NV04_PGRAPH_BBASE0 0x00400658
140#define NV04_PGRAPH_BBASE1 0x0040065C
141#define NV04_PGRAPH_BBASE2 0x00400660
142#define NV04_PGRAPH_BBASE3 0x00400664
143#define NV04_PGRAPH_BBASE4 0x00400668
144#define NV04_PGRAPH_BBASE5 0x0040066C
145#define NV04_PGRAPH_BPITCH0 0x00400670
146#define NV04_PGRAPH_BPITCH1 0x00400674
147#define NV04_PGRAPH_BPITCH2 0x00400678
148#define NV04_PGRAPH_BPITCH3 0x0040067C
149#define NV04_PGRAPH_BPITCH4 0x00400680
150#define NV04_PGRAPH_BLIMIT0 0x00400684
151#define NV04_PGRAPH_BLIMIT1 0x00400688
152#define NV04_PGRAPH_BLIMIT2 0x0040068C
153#define NV04_PGRAPH_BLIMIT3 0x00400690
154#define NV04_PGRAPH_BLIMIT4 0x00400694
155#define NV04_PGRAPH_BLIMIT5 0x00400698
156#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
157#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
158#define NV03_PGRAPH_STATUS 0x004006B0
159#define NV04_PGRAPH_STATUS 0x00400700
160# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
161#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
162#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
163#define NV04_PGRAPH_SURFACE 0x0040070C
164#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
165#define NV04_PGRAPH_STATE 0x00400710
166#define NV10_PGRAPH_SURFACE 0x00400710
167#define NV04_PGRAPH_NOTIFY 0x00400714
168#define NV10_PGRAPH_STATE 0x00400714
169#define NV10_PGRAPH_NOTIFY 0x00400718
170
171#define NV04_PGRAPH_FIFO 0x00400720
172
173#define NV04_PGRAPH_BPIXEL 0x00400724
174#define NV10_PGRAPH_RDI_INDEX 0x00400750
175#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
176#define NV10_PGRAPH_RDI_DATA 0x00400754
177#define NV04_PGRAPH_DMA_PITCH 0x00400760
178#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
179#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
180#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
181#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
182#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
183#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
184#define NV10_PGRAPH_DMA_PITCH 0x00400770
185#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
186#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
187#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
188#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
189#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
190#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
191#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
192#define NV04_PGRAPH_PATT_COLOR0 0x00400800
193#define NV04_PGRAPH_PATT_COLOR1 0x00400804
194#define NV04_PGRAPH_PATTERN 0x00400808
195#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
196#define NV04_PGRAPH_CHROMA 0x00400814
197#define NV04_PGRAPH_CONTROL0 0x00400818
198#define NV04_PGRAPH_CONTROL1 0x0040081C
199#define NV04_PGRAPH_CONTROL2 0x00400820
200#define NV04_PGRAPH_BLEND 0x00400824
201#define NV04_PGRAPH_STORED_FMT 0x00400830
202#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
203#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
204#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
208#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
209#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
210#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
211#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
212#define NV04_PGRAPH_U_RAM 0x00400D00
213#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
214#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
215#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
216#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
217#define NV04_PGRAPH_V_RAM 0x00400D40
218#define NV04_PGRAPH_W_RAM 0x00400D80
219#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
220#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
221#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
222#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
223#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
224#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
225#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
226#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
227#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
228#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
229#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
230#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
231#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
232#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
233#define NV10_PGRAPH_XFMODE0 0x00400F40
234#define NV10_PGRAPH_XFMODE1 0x00400F44
235#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
236#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
237#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
238#define NV10_PGRAPH_PIPE_DATA 0x00400F54
239#define NV04_PGRAPH_DMA_START_0 0x00401000
240#define NV04_PGRAPH_DMA_START_1 0x00401004
241#define NV04_PGRAPH_DMA_LENGTH 0x00401008
242#define NV04_PGRAPH_DMA_MISC 0x0040100C
243#define NV04_PGRAPH_DMA_DATA_0 0x00401020
244#define NV04_PGRAPH_DMA_DATA_1 0x00401024
245#define NV04_PGRAPH_DMA_RM 0x00401030
246#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
247#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
248#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
249#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
250#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
251#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
252#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
253#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
254#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
255#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
256#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
257#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
258#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
259#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
260#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
261#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
262#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
263#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
264#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
265#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
266#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
267#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
268
269#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
new file mode 100644
index 000000000000..1f394a2629e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/handle.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33
34#include <engine/fifo.h>
35#include <engine/mpeg.h>
36#include <engine/graph/nv40.h>
37
38struct nv31_mpeg_priv {
39 struct nouveau_mpeg base;
40 atomic_t refcount;
41};
42
43struct nv31_mpeg_chan {
44 struct nouveau_object base;
45};
46
47/*******************************************************************************
48 * MPEG object classes
49 ******************************************************************************/
50
51static int
52nv31_mpeg_object_ctor(struct nouveau_object *parent,
53 struct nouveau_object *engine,
54 struct nouveau_oclass *oclass, void *data, u32 size,
55 struct nouveau_object **pobject)
56{
57 struct nouveau_gpuobj *obj;
58 int ret;
59
60 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
61 20, 16, 0, &obj);
62 *pobject = nv_object(obj);
63 if (ret)
64 return ret;
65
66 nv_wo32(obj, 0x00, nv_mclass(obj));
67 nv_wo32(obj, 0x04, 0x00000000);
68 nv_wo32(obj, 0x08, 0x00000000);
69 nv_wo32(obj, 0x0c, 0x00000000);
70 return 0;
71}
72
73static int
74nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
75{
76 struct nouveau_instmem *imem = nouveau_instmem(object);
77 struct nv31_mpeg_priv *priv = (void *)object->engine;
78 u32 inst = *(u32 *)arg << 4;
79 u32 dma0 = nv_ro32(imem, inst + 0);
80 u32 dma1 = nv_ro32(imem, inst + 4);
81 u32 dma2 = nv_ro32(imem, inst + 8);
82 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
83 u32 size = dma1 + 1;
84
85 /* only allow linear DMA objects */
86 if (!(dma0 & 0x00002000))
87 return -EINVAL;
88
89 if (mthd == 0x0190) {
90 /* DMA_CMD */
91 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
92 nv_wr32(priv, 0x00b334, base);
93 nv_wr32(priv, 0x00b324, size);
94 } else
95 if (mthd == 0x01a0) {
96 /* DMA_DATA */
97 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
98 nv_wr32(priv, 0x00b360, base);
99 nv_wr32(priv, 0x00b364, size);
100 } else {
101 /* DMA_IMAGE, VRAM only */
102 if (dma0 & 0x000c0000)
103 return -EINVAL;
104
105 nv_wr32(priv, 0x00b370, base);
106 nv_wr32(priv, 0x00b374, size);
107 }
108
109 return 0;
110}
111
112static struct nouveau_ofuncs
113nv31_mpeg_ofuncs = {
114 .ctor = nv31_mpeg_object_ctor,
115 .dtor = _nouveau_gpuobj_dtor,
116 .init = _nouveau_gpuobj_init,
117 .fini = _nouveau_gpuobj_fini,
118 .rd32 = _nouveau_gpuobj_rd32,
119 .wr32 = _nouveau_gpuobj_wr32,
120};
121
122static struct nouveau_omthds
123nv31_mpeg_omthds[] = {
124 { 0x0190, nv31_mpeg_mthd_dma },
125 { 0x01a0, nv31_mpeg_mthd_dma },
126 { 0x01b0, nv31_mpeg_mthd_dma },
127 {}
128};
129
130struct nouveau_oclass
131nv31_mpeg_sclass[] = {
132 { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
133 {}
134};
135
136/*******************************************************************************
137 * PMPEG context
138 ******************************************************************************/
139
140static int
141nv31_mpeg_context_ctor(struct nouveau_object *parent,
142 struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv31_mpeg_priv *priv = (void *)engine;
147 struct nv31_mpeg_chan *chan;
148 int ret;
149
150 if (!atomic_add_unless(&priv->refcount, 1, 1))
151 return -EBUSY;
152
153 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
154 *pobject = nv_object(chan);
155 if (ret)
156 return ret;
157
158 return 0;
159}
160
161static void
162nv31_mpeg_context_dtor(struct nouveau_object *object)
163{
164 struct nv31_mpeg_priv *priv = (void *)object->engine;
165 struct nv31_mpeg_chan *chan = (void *)object;
166 atomic_dec(&priv->refcount);
167 nouveau_object_destroy(&chan->base);
168}
169
170static struct nouveau_oclass
171nv31_mpeg_cclass = {
172 .handle = NV_ENGCTX(MPEG, 0x31),
173 .ofuncs = &(struct nouveau_ofuncs) {
174 .ctor = nv31_mpeg_context_ctor,
175 .dtor = nv31_mpeg_context_dtor,
176 .init = nouveau_object_init,
177 .fini = nouveau_object_fini,
178 },
179};
180
181/*******************************************************************************
182 * PMPEG engine/subdev functions
183 ******************************************************************************/
184
185void
186nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
187{
188 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
189 struct nv31_mpeg_priv *priv = (void *)engine;
190
191 nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
192 nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
193 nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
194}
195
196void
197nv31_mpeg_intr(struct nouveau_subdev *subdev)
198{
199 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
200 struct nouveau_engine *engine = nv_engine(subdev);
201 struct nouveau_object *engctx;
202 struct nouveau_handle *handle;
203 struct nv31_mpeg_priv *priv = (void *)subdev;
204 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
205 u32 stat = nv_rd32(priv, 0x00b100);
206 u32 type = nv_rd32(priv, 0x00b230);
207 u32 mthd = nv_rd32(priv, 0x00b234);
208 u32 data = nv_rd32(priv, 0x00b238);
209 u32 show = stat;
210 int chid;
211
212 engctx = nouveau_engctx_get(engine, inst);
213 chid = pfifo->chid(pfifo, engctx);
214
215 if (stat & 0x01000000) {
216 /* happens on initial binding of the object */
217 if (type == 0x00000020 && mthd == 0x0000) {
218 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
219 show &= ~0x01000000;
220 }
221
222 if (type == 0x00000010) {
223 handle = nouveau_handle_get_class(engctx, 0x3174);
224 if (handle && !nv_call(handle->object, mthd, data))
225 show &= ~0x01000000;
226 nouveau_handle_put(handle);
227 }
228 }
229
230 nv_wr32(priv, 0x00b100, stat);
231 nv_wr32(priv, 0x00b230, 0x00000001);
232
233 if (show) {
234 nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
235 chid, inst << 4, stat, type, mthd, data);
236 }
237
238 nouveau_engctx_put(engctx);
239}
240
241static int
242nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
243 struct nouveau_oclass *oclass, void *data, u32 size,
244 struct nouveau_object **pobject)
245{
246 struct nv31_mpeg_priv *priv;
247 int ret;
248
249 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
250 *pobject = nv_object(priv);
251 if (ret)
252 return ret;
253
254 nv_subdev(priv)->unit = 0x00000002;
255 nv_subdev(priv)->intr = nv31_mpeg_intr;
256 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
257 nv_engine(priv)->sclass = nv31_mpeg_sclass;
258 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
259 return 0;
260}
261
262int
263nv31_mpeg_init(struct nouveau_object *object)
264{
265 struct nouveau_engine *engine = nv_engine(object->engine);
266 struct nv31_mpeg_priv *priv = (void *)engine;
267 struct nouveau_fb *pfb = nouveau_fb(object);
268 int ret, i;
269
270 ret = nouveau_mpeg_init(&priv->base);
271 if (ret)
272 return ret;
273
274 /* VPE init */
275 nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
276 nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
277
278 for (i = 0; i < pfb->tile.regions; i++)
279 engine->tile_prog(engine, i);
280
281 /* PMPEG init */
282 nv_wr32(priv, 0x00b32c, 0x00000000);
283 nv_wr32(priv, 0x00b314, 0x00000100);
284 nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
285 nv_wr32(priv, 0x00b300, 0x02001ec1);
286 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
287
288 nv_wr32(priv, 0x00b100, 0xffffffff);
289 nv_wr32(priv, 0x00b140, 0xffffffff);
290
291 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
292 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
293 return -EBUSY;
294 }
295
296 return 0;
297}
298
299struct nouveau_oclass
300nv31_mpeg_oclass = {
301 .handle = NV_ENGINE(MPEG, 0x31),
302 .ofuncs = &(struct nouveau_ofuncs) {
303 .ctor = nv31_mpeg_ctor,
304 .dtor = _nouveau_mpeg_dtor,
305 .init = nv31_mpeg_init,
306 .fini = _nouveau_mpeg_fini,
307 },
308};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 000000000000..12418574efea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/fb.h>
30#include <subdev/timer.h>
31#include <subdev/instmem.h>
32
33#include <engine/mpeg.h>
34#include <engine/graph/nv40.h>
35
36struct nv40_mpeg_priv {
37 struct nouveau_mpeg base;
38};
39
40struct nv40_mpeg_chan {
41 struct nouveau_mpeg base;
42};
43
44/*******************************************************************************
45 * PMPEG context
46 ******************************************************************************/
47
48static int
49nv40_mpeg_context_ctor(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nv40_mpeg_chan *chan;
55 int ret;
56
57 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
58 264 * 4, 16,
59 NVOBJ_FLAG_ZERO_ALLOC, &chan);
60 *pobject = nv_object(chan);
61 if (ret)
62 return ret;
63
64 return 0;
65}
66
67static int
68nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
69{
70
71 struct nv40_mpeg_priv *priv = (void *)object->engine;
72 struct nv40_mpeg_chan *chan = (void *)object;
73 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
74
75 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
76 if (nv_rd32(priv, 0x00b318) == inst)
77 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
78 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv40_mpeg_cclass = {
84 .handle = NV_ENGCTX(MPEG, 0x40),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv40_mpeg_context_ctor,
87 .dtor = _nouveau_mpeg_context_dtor,
88 .init = _nouveau_mpeg_context_init,
89 .fini = nv40_mpeg_context_fini,
90 .rd32 = _nouveau_mpeg_context_rd32,
91 .wr32 = _nouveau_mpeg_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PMPEG engine/subdev functions
97 ******************************************************************************/
98
99static void
100nv40_mpeg_intr(struct nouveau_subdev *subdev)
101{
102 struct nv40_mpeg_priv *priv = (void *)subdev;
103 u32 stat;
104
105 if ((stat = nv_rd32(priv, 0x00b100)))
106 nv31_mpeg_intr(subdev);
107
108 if ((stat = nv_rd32(priv, 0x00b800))) {
109 nv_error(priv, "PMSRCH 0x%08x\n", stat);
110 nv_wr32(priv, 0x00b800, stat);
111 }
112}
113
114static int
115nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nv40_mpeg_priv *priv;
120 int ret;
121
122 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 nv_subdev(priv)->unit = 0x00000002;
128 nv_subdev(priv)->intr = nv40_mpeg_intr;
129 nv_engine(priv)->cclass = &nv40_mpeg_cclass;
130 nv_engine(priv)->sclass = nv31_mpeg_sclass;
131 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
132 return 0;
133}
134
135struct nouveau_oclass
136nv40_mpeg_oclass = {
137 .handle = NV_ENGINE(MPEG, 0x40),
138 .ofuncs = &(struct nouveau_ofuncs) {
139 .ctor = nv40_mpeg_ctor,
140 .dtor = _nouveau_mpeg_dtor,
141 .init = nv31_mpeg_init,
142 .fini = _nouveau_mpeg_fini,
143 },
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
new file mode 100644
index 000000000000..8678a9996d57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -0,0 +1,240 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/vm.h>
30#include <subdev/bar.h>
31#include <subdev/timer.h>
32
33#include <engine/mpeg.h>
34
35struct nv50_mpeg_priv {
36 struct nouveau_mpeg base;
37};
38
39struct nv50_mpeg_chan {
40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
46
47static int
48nv50_mpeg_object_ctor(struct nouveau_object *parent,
49 struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nouveau_gpuobj *obj;
54 int ret;
55
56 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
57 16, 16, 0, &obj);
58 *pobject = nv_object(obj);
59 if (ret)
60 return ret;
61
62 nv_wo32(obj, 0x00, nv_mclass(obj));
63 nv_wo32(obj, 0x04, 0x00000000);
64 nv_wo32(obj, 0x08, 0x00000000);
65 nv_wo32(obj, 0x0c, 0x00000000);
66 return 0;
67}
68
69struct nouveau_ofuncs
70nv50_mpeg_ofuncs = {
71 .ctor = nv50_mpeg_object_ctor,
72 .dtor = _nouveau_gpuobj_dtor,
73 .init = _nouveau_gpuobj_init,
74 .fini = _nouveau_gpuobj_fini,
75 .rd32 = _nouveau_gpuobj_rd32,
76 .wr32 = _nouveau_gpuobj_wr32,
77};
78
79static struct nouveau_oclass
80nv50_mpeg_sclass[] = {
81 { 0x3174, &nv50_mpeg_ofuncs },
82 {}
83};
84
85/*******************************************************************************
86 * PMPEG context
87 ******************************************************************************/
88
89int
90nv50_mpeg_context_ctor(struct nouveau_object *parent,
91 struct nouveau_object *engine,
92 struct nouveau_oclass *oclass, void *data, u32 size,
93 struct nouveau_object **pobject)
94{
95 struct nouveau_bar *bar = nouveau_bar(parent);
96 struct nv50_mpeg_chan *chan;
97 int ret;
98
99 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
101 *pobject = nv_object(chan);
102 if (ret)
103 return ret;
104
105 nv_wo32(chan, 0x0070, 0x00801ec1);
106 nv_wo32(chan, 0x007c, 0x0000037c);
107 bar->flush(bar);
108 return 0;
109}
110
111static struct nouveau_oclass
112nv50_mpeg_cclass = {
113 .handle = NV_ENGCTX(MPEG, 0x50),
114 .ofuncs = &(struct nouveau_ofuncs) {
115 .ctor = nv50_mpeg_context_ctor,
116 .dtor = _nouveau_mpeg_context_dtor,
117 .init = _nouveau_mpeg_context_init,
118 .fini = _nouveau_mpeg_context_fini,
119 .rd32 = _nouveau_mpeg_context_rd32,
120 .wr32 = _nouveau_mpeg_context_wr32,
121 },
122};
123
124/*******************************************************************************
125 * PMPEG engine/subdev functions
126 ******************************************************************************/
127
128int
129nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
130{
131 nv50_vm_flush_engine(&engine->base, 0x08);
132 return 0;
133}
134
135void
136nv50_mpeg_intr(struct nouveau_subdev *subdev)
137{
138 struct nv50_mpeg_priv *priv = (void *)subdev;
139 u32 stat = nv_rd32(priv, 0x00b100);
140 u32 type = nv_rd32(priv, 0x00b230);
141 u32 mthd = nv_rd32(priv, 0x00b234);
142 u32 data = nv_rd32(priv, 0x00b238);
143 u32 show = stat;
144
145 if (stat & 0x01000000) {
146 /* happens on initial binding of the object */
147 if (type == 0x00000020 && mthd == 0x0000) {
148 nv_wr32(priv, 0x00b308, 0x00000100);
149 show &= ~0x01000000;
150 }
151 }
152
153 if (show) {
154 nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
155 stat, type, mthd, data);
156 }
157
158 nv_wr32(priv, 0x00b100, stat);
159 nv_wr32(priv, 0x00b230, 0x00000001);
160 nv50_fb_trap(nouveau_fb(priv), 1);
161}
162
163static void
164nv50_vpe_intr(struct nouveau_subdev *subdev)
165{
166 struct nv50_mpeg_priv *priv = (void *)subdev;
167
168 if (nv_rd32(priv, 0x00b100))
169 nv50_mpeg_intr(subdev);
170
171 if (nv_rd32(priv, 0x00b800)) {
172 u32 stat = nv_rd32(priv, 0x00b800);
173 nv_info(priv, "PMSRCH: 0x%08x\n", stat);
174 nv_wr32(priv, 0xb800, stat);
175 }
176}
177
178static int
179nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
180 struct nouveau_oclass *oclass, void *data, u32 size,
181 struct nouveau_object **pobject)
182{
183 struct nv50_mpeg_priv *priv;
184 int ret;
185
186 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
187 *pobject = nv_object(priv);
188 if (ret)
189 return ret;
190
191 nv_subdev(priv)->unit = 0x00400002;
192 nv_subdev(priv)->intr = nv50_vpe_intr;
193 nv_engine(priv)->cclass = &nv50_mpeg_cclass;
194 nv_engine(priv)->sclass = nv50_mpeg_sclass;
195 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
196 return 0;
197}
198
199int
200nv50_mpeg_init(struct nouveau_object *object)
201{
202 struct nv50_mpeg_priv *priv = (void *)object;
203 int ret;
204
205 ret = nouveau_mpeg_init(&priv->base);
206 if (ret)
207 return ret;
208
209 nv_wr32(priv, 0x00b32c, 0x00000000);
210 nv_wr32(priv, 0x00b314, 0x00000100);
211 nv_wr32(priv, 0x00b0e0, 0x0000001a);
212
213 nv_wr32(priv, 0x00b220, 0x00000044);
214 nv_wr32(priv, 0x00b300, 0x00801ec1);
215 nv_wr32(priv, 0x00b390, 0x00000000);
216 nv_wr32(priv, 0x00b394, 0x00000000);
217 nv_wr32(priv, 0x00b398, 0x00000000);
218 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
219
220 nv_wr32(priv, 0x00b100, 0xffffffff);
221 nv_wr32(priv, 0x00b140, 0xffffffff);
222
223 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
224 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
225 return -EBUSY;
226 }
227
228 return 0;
229}
230
231struct nouveau_oclass
232nv50_mpeg_oclass = {
233 .handle = NV_ENGINE(MPEG, 0x50),
234 .ofuncs = &(struct nouveau_ofuncs) {
235 .ctor = nv50_mpeg_ctor,
236 .dtor = _nouveau_mpeg_dtor,
237 .init = nv50_mpeg_init,
238 .fini = _nouveau_mpeg_fini,
239 },
240};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 000000000000..8f805b44d59e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/vm.h>
30#include <subdev/bar.h>
31#include <subdev/timer.h>
32
33#include <engine/mpeg.h>
34
35struct nv84_mpeg_priv {
36 struct nouveau_mpeg base;
37};
38
39struct nv84_mpeg_chan {
40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nv84_mpeg_sclass[] = {
49 { 0x8274, &nv50_mpeg_ofuncs },
50 {}
51};
52
53/*******************************************************************************
54 * PMPEG context
55 ******************************************************************************/
56
57static struct nouveau_oclass
58nv84_mpeg_cclass = {
59 .handle = NV_ENGCTX(MPEG, 0x84),
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nv50_mpeg_context_ctor,
62 .dtor = _nouveau_mpeg_context_dtor,
63 .init = _nouveau_mpeg_context_init,
64 .fini = _nouveau_mpeg_context_fini,
65 .rd32 = _nouveau_mpeg_context_rd32,
66 .wr32 = _nouveau_mpeg_context_wr32,
67 },
68};
69
70/*******************************************************************************
71 * PMPEG engine/subdev functions
72 ******************************************************************************/
73
74static int
75nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 size,
77 struct nouveau_object **pobject)
78{
79 struct nv84_mpeg_priv *priv;
80 int ret;
81
82 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
83 *pobject = nv_object(priv);
84 if (ret)
85 return ret;
86
87 nv_subdev(priv)->unit = 0x00000002;
88 nv_subdev(priv)->intr = nv50_mpeg_intr;
89 nv_engine(priv)->cclass = &nv84_mpeg_cclass;
90 nv_engine(priv)->sclass = nv84_mpeg_sclass;
91 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
92 return 0;
93}
94
95struct nouveau_oclass
96nv84_mpeg_oclass = {
97 .handle = NV_ENGINE(MPEG, 0x84),
98 .ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nv84_mpeg_ctor,
100 .dtor = _nouveau_mpeg_dtor,
101 .init = nv50_mpeg_init,
102 .fini = _nouveau_mpeg_fini,
103 },
104};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
new file mode 100644
index 000000000000..50e7e0da1981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/ppp.h>
30
31struct nv98_ppp_priv {
32 struct nouveau_ppp base;
33};
34
35struct nv98_ppp_chan {
36 struct nouveau_ppp_chan base;
37};
38
39/*******************************************************************************
40 * PPP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv98_ppp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PPPP context
50 ******************************************************************************/
51
52static int
53nv98_ppp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv98_ppp_chan *priv;
59 int ret;
60
61 ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv98_ppp_context_dtor(struct nouveau_object *object)
72{
73 struct nv98_ppp_chan *priv = (void *)object;
74 nouveau_ppp_context_destroy(&priv->base);
75}
76
77static int
78nv98_ppp_context_init(struct nouveau_object *object)
79{
80 struct nv98_ppp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_ppp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv98_ppp_chan *priv = (void *)object;
94 return nouveau_ppp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv98_ppp_cclass = {
99 .handle = NV_ENGCTX(PPP, 0x98),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv98_ppp_context_ctor,
102 .dtor = nv98_ppp_context_dtor,
103 .init = nv98_ppp_context_init,
104 .fini = nv98_ppp_context_fini,
105 .rd32 = _nouveau_ppp_context_rd32,
106 .wr32 = _nouveau_ppp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PPPP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv98_ppp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv98_ppp_priv *priv;
125 int ret;
126
127 ret = nouveau_ppp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x00400002;
133 nv_subdev(priv)->intr = nv98_ppp_intr;
134 nv_engine(priv)->cclass = &nv98_ppp_cclass;
135 nv_engine(priv)->sclass = nv98_ppp_sclass;
136 return 0;
137}
138
139static void
140nv98_ppp_dtor(struct nouveau_object *object)
141{
142 struct nv98_ppp_priv *priv = (void *)object;
143 nouveau_ppp_destroy(&priv->base);
144}
145
146static int
147nv98_ppp_init(struct nouveau_object *object)
148{
149 struct nv98_ppp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_ppp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv98_ppp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv98_ppp_priv *priv = (void *)object;
163 return nouveau_ppp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv98_ppp_oclass = {
168 .handle = NV_ENGINE(PPP, 0x98),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv98_ppp_ctor,
171 .dtor = nv98_ppp_dtor,
172 .init = nv98_ppp_init,
173 .fini = nv98_ppp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 000000000000..3ca4c3aa90b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/fifo.h>
31
32struct nv04_software_priv {
33 struct nouveau_software base;
34};
35
36struct nv04_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
46 void *data, u32 size)
47{
48 struct nouveau_object *channel = (void *)nv_engctx(object->parent);
49 struct nouveau_fifo_chan *fifo = (void *)channel->parent;
50 atomic_set(&fifo->refcnt, *(u32*)data);
51 return 0;
52}
53
54static int
55nv04_software_flip(struct nouveau_object *object, u32 mthd,
56 void *args, u32 size)
57{
58 struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
59 if (chan->base.flip)
60 return chan->base.flip(chan->base.flip_data);
61 return -EINVAL;
62}
63
64static struct nouveau_omthds
65nv04_software_omthds[] = {
66 { 0x0150, nv04_software_set_ref },
67 { 0x0500, nv04_software_flip },
68 {}
69};
70
71static struct nouveau_oclass
72nv04_software_sclass[] = {
73 { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
74 {}
75};
76
77/*******************************************************************************
78 * software context
79 ******************************************************************************/
80
81static int
82nv04_software_context_ctor(struct nouveau_object *parent,
83 struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct nv04_software_chan *chan;
88 int ret;
89
90 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
91 *pobject = nv_object(chan);
92 if (ret)
93 return ret;
94
95 return 0;
96}
97
98static struct nouveau_oclass
99nv04_software_cclass = {
100 .handle = NV_ENGCTX(SW, 0x04),
101 .ofuncs = &(struct nouveau_ofuncs) {
102 .ctor = nv04_software_context_ctor,
103 .dtor = _nouveau_software_context_dtor,
104 .init = _nouveau_software_context_init,
105 .fini = _nouveau_software_context_fini,
106 },
107};
108
109/*******************************************************************************
110 * software engine/subdev functions
111 ******************************************************************************/
112
113void
114nv04_software_intr(struct nouveau_subdev *subdev)
115{
116 nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
117}
118
119static int
120nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv04_software_priv *priv;
125 int ret;
126
127 ret = nouveau_software_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_engine(priv)->cclass = &nv04_software_cclass;
133 nv_engine(priv)->sclass = nv04_software_sclass;
134 nv_subdev(priv)->intr = nv04_software_intr;
135 return 0;
136}
137
138struct nouveau_oclass
139nv04_software_oclass = {
140 .handle = NV_ENGINE(SW, 0x04),
141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nv04_software_ctor,
143 .dtor = _nouveau_software_dtor,
144 .init = _nouveau_software_init,
145 .fini = _nouveau_software_fini,
146 },
147};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 000000000000..6e699afbfdb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30
31struct nv10_software_priv {
32 struct nouveau_software base;
33};
34
35struct nv10_software_chan {
36 struct nouveau_software_chan base;
37};
38
39/*******************************************************************************
40 * software object classes
41 ******************************************************************************/
42
43static int
44nv10_software_flip(struct nouveau_object *object, u32 mthd,
45 void *args, u32 size)
46{
47 struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
48 if (chan->base.flip)
49 return chan->base.flip(chan->base.flip_data);
50 return -EINVAL;
51}
52
53static struct nouveau_omthds
54nv10_software_omthds[] = {
55 { 0x0500, nv10_software_flip },
56 {}
57};
58
59static struct nouveau_oclass
60nv10_software_sclass[] = {
61 { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
62 {}
63};
64
65/*******************************************************************************
66 * software context
67 ******************************************************************************/
68
69static int
70nv10_software_context_ctor(struct nouveau_object *parent,
71 struct nouveau_object *engine,
72 struct nouveau_oclass *oclass, void *data, u32 size,
73 struct nouveau_object **pobject)
74{
75 struct nv10_software_chan *chan;
76 int ret;
77
78 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
79 *pobject = nv_object(chan);
80 if (ret)
81 return ret;
82
83 return 0;
84}
85
86static struct nouveau_oclass
87nv10_software_cclass = {
88 .handle = NV_ENGCTX(SW, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv10_software_context_ctor,
91 .dtor = _nouveau_software_context_dtor,
92 .init = _nouveau_software_context_init,
93 .fini = _nouveau_software_context_fini,
94 },
95};
96
97/*******************************************************************************
98 * software engine/subdev functions
99 ******************************************************************************/
100
101static int
102nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject)
105{
106 struct nv10_software_priv *priv;
107 int ret;
108
109 ret = nouveau_software_create(parent, engine, oclass, &priv);
110 *pobject = nv_object(priv);
111 if (ret)
112 return ret;
113
114 nv_engine(priv)->cclass = &nv10_software_cclass;
115 nv_engine(priv)->sclass = nv10_software_sclass;
116 nv_subdev(priv)->intr = nv04_software_intr;
117 return 0;
118}
119
120struct nouveau_oclass
121nv10_software_oclass = {
122 .handle = NV_ENGINE(SW, 0x10),
123 .ofuncs = &(struct nouveau_ofuncs) {
124 .ctor = nv10_software_ctor,
125 .dtor = _nouveau_software_dtor,
126 .init = _nouveau_software_init,
127 .fini = _nouveau_software_fini,
128 },
129};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 000000000000..a2edcd38544a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/namedb.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31
32#include <engine/software.h>
33#include <engine/disp.h>
34
35struct nv50_software_priv {
36 struct nouveau_software base;
37};
38
39struct nv50_software_chan {
40 struct nouveau_software_chan base;
41};
42
43/*******************************************************************************
44 * software object classes
45 ******************************************************************************/
46
47static int
48nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
49 void *args, u32 size)
50{
51 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
52 struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
53 struct nouveau_handle *handle;
54 int ret = -EINVAL;
55
56 handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
57 if (!handle)
58 return -ENOENT;
59
60 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
61 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
62 chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
63 ret = 0;
64 }
65 nouveau_namedb_put(handle);
66 return ret;
67}
68
69static int
70nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
74 chan->base.vblank.offset = *(u32 *)args;
75 return 0;
76}
77
78static int
79nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
80 void *args, u32 size)
81{
82 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
83 chan->base.vblank.value = *(u32 *)args;
84 return 0;
85}
86
87static int
88nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
89 void *args, u32 size)
90{
91 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
92 struct nouveau_disp *disp = nouveau_disp(object);
93 unsigned long flags;
94 u32 crtc = *(u32 *)args;
95
96 if (crtc > 1)
97 return -EINVAL;
98
99 disp->vblank.get(disp->vblank.data, crtc);
100
101 spin_lock_irqsave(&disp->vblank.lock, flags);
102 list_add(&chan->base.vblank.head, &disp->vblank.list);
103 chan->base.vblank.crtc = crtc;
104 spin_unlock_irqrestore(&disp->vblank.lock, flags);
105 return 0;
106}
107
108static int
109nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
110 void *args, u32 size)
111{
112 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
113 if (chan->base.flip)
114 return chan->base.flip(chan->base.flip_data);
115 return -EINVAL;
116}
117
118static struct nouveau_omthds
119nv50_software_omthds[] = {
120 { 0x018c, nv50_software_mthd_dma_vblsem },
121 { 0x0400, nv50_software_mthd_vblsem_offset },
122 { 0x0404, nv50_software_mthd_vblsem_value },
123 { 0x0408, nv50_software_mthd_vblsem_release },
124 { 0x0500, nv50_software_mthd_flip },
125 {}
126};
127
128static struct nouveau_oclass
129nv50_software_sclass[] = {
130 { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
131 {}
132};
133
134/*******************************************************************************
135 * software context
136 ******************************************************************************/
137
138static int
139nv50_software_context_ctor(struct nouveau_object *parent,
140 struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size,
142 struct nouveau_object **pobject)
143{
144 struct nv50_software_chan *chan;
145 int ret;
146
147 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
148 *pobject = nv_object(chan);
149 if (ret)
150 return ret;
151
152 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
153 return 0;
154}
155
156static struct nouveau_oclass
157nv50_software_cclass = {
158 .handle = NV_ENGCTX(SW, 0x50),
159 .ofuncs = &(struct nouveau_ofuncs) {
160 .ctor = nv50_software_context_ctor,
161 .dtor = _nouveau_software_context_dtor,
162 .init = _nouveau_software_context_init,
163 .fini = _nouveau_software_context_fini,
164 },
165};
166
167/*******************************************************************************
168 * software engine/subdev functions
169 ******************************************************************************/
170
171static int
172nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject)
175{
176 struct nv50_software_priv *priv;
177 int ret;
178
179 ret = nouveau_software_create(parent, engine, oclass, &priv);
180 *pobject = nv_object(priv);
181 if (ret)
182 return ret;
183
184 nv_engine(priv)->cclass = &nv50_software_cclass;
185 nv_engine(priv)->sclass = nv50_software_sclass;
186 nv_subdev(priv)->intr = nv04_software_intr;
187 return 0;
188}
189
190struct nouveau_oclass
191nv50_software_oclass = {
192 .handle = NV_ENGINE(SW, 0x50),
193 .ofuncs = &(struct nouveau_ofuncs) {
194 .ctor = nv50_software_ctor,
195 .dtor = _nouveau_software_dtor,
196 .init = _nouveau_software_init,
197 .fini = _nouveau_software_fini,
198 },
199};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 000000000000..b7b0d7e330d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/disp.h>
31
32struct nvc0_software_priv {
33 struct nouveau_software base;
34};
35
36struct nvc0_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
46 void *args, u32 size)
47{
48 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
49 u64 data = *(u32 *)args;
50 if (mthd == 0x0400) {
51 chan->base.vblank.offset &= 0x00ffffffffULL;
52 chan->base.vblank.offset |= data << 32;
53 } else {
54 chan->base.vblank.offset &= 0xff00000000ULL;
55 chan->base.vblank.offset |= data;
56 }
57 return 0;
58}
59
60static int
61nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
62 void *args, u32 size)
63{
64 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
65 chan->base.vblank.value = *(u32 *)args;
66 return 0;
67}
68
69static int
70nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
74 struct nouveau_disp *disp = nouveau_disp(object);
75 unsigned long flags;
76 u32 crtc = *(u32 *)args;
77
78 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
79 return -EINVAL;
80
81 disp->vblank.get(disp->vblank.data, crtc);
82
83 spin_lock_irqsave(&disp->vblank.lock, flags);
84 list_add(&chan->base.vblank.head, &disp->vblank.list);
85 chan->base.vblank.crtc = crtc;
86 spin_unlock_irqrestore(&disp->vblank.lock, flags);
87 return 0;
88}
89
90static int
91nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
92 void *args, u32 size)
93{
94 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
95 if (chan->base.flip)
96 return chan->base.flip(chan->base.flip_data);
97 return -EINVAL;
98}
99
100static struct nouveau_omthds
101nvc0_software_omthds[] = {
102 { 0x0400, nvc0_software_mthd_vblsem_offset },
103 { 0x0404, nvc0_software_mthd_vblsem_offset },
104 { 0x0408, nvc0_software_mthd_vblsem_value },
105 { 0x040c, nvc0_software_mthd_vblsem_release },
106 { 0x0500, nvc0_software_mthd_flip },
107 {}
108};
109
110static struct nouveau_oclass
111nvc0_software_sclass[] = {
112 { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
113 {}
114};
115
116/*******************************************************************************
117 * software context
118 ******************************************************************************/
119
120static int
121nvc0_software_context_ctor(struct nouveau_object *parent,
122 struct nouveau_object *engine,
123 struct nouveau_oclass *oclass, void *data, u32 size,
124 struct nouveau_object **pobject)
125{
126 struct nvc0_software_chan *chan;
127 int ret;
128
129 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
130 *pobject = nv_object(chan);
131 if (ret)
132 return ret;
133
134 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
135 return 0;
136}
137
138static struct nouveau_oclass
139nvc0_software_cclass = {
140 .handle = NV_ENGCTX(SW, 0xc0),
141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nvc0_software_context_ctor,
143 .dtor = _nouveau_software_context_dtor,
144 .init = _nouveau_software_context_init,
145 .fini = _nouveau_software_context_fini,
146 },
147};
148
149/*******************************************************************************
150 * software engine/subdev functions
151 ******************************************************************************/
152
153static int
154nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
155 struct nouveau_oclass *oclass, void *data, u32 size,
156 struct nouveau_object **pobject)
157{
158 struct nvc0_software_priv *priv;
159 int ret;
160
161 ret = nouveau_software_create(parent, engine, oclass, &priv);
162 *pobject = nv_object(priv);
163 if (ret)
164 return ret;
165
166 nv_engine(priv)->cclass = &nvc0_software_cclass;
167 nv_engine(priv)->sclass = nvc0_software_sclass;
168 nv_subdev(priv)->intr = nv04_software_intr;
169 return 0;
170}
171
172struct nouveau_oclass
173nvc0_software_oclass = {
174 .handle = NV_ENGINE(SW, 0xc0),
175 .ofuncs = &(struct nouveau_ofuncs) {
176 .ctor = nvc0_software_ctor,
177 .dtor = _nouveau_software_dtor,
178 .init = _nouveau_software_init,
179 .fini = _nouveau_software_fini,
180 },
181};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
new file mode 100644
index 000000000000..dd23c80e5405
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/vp.h>
30
31struct nv84_vp_priv {
32 struct nouveau_vp base;
33};
34
35struct nv84_vp_chan {
36 struct nouveau_vp_chan base;
37};
38
39/*******************************************************************************
40 * VP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_vp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PVP context
50 ******************************************************************************/
51
52static int
53nv84_vp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_vp_chan *priv;
59 int ret;
60
61 ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_vp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_vp_chan *priv = (void *)object;
74 nouveau_vp_context_destroy(&priv->base);
75}
76
77static int
78nv84_vp_context_init(struct nouveau_object *object)
79{
80 struct nv84_vp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_vp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_vp_chan *priv = (void *)object;
94 return nouveau_vp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_vp_cclass = {
99 .handle = NV_ENGCTX(VP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_vp_context_ctor,
102 .dtor = nv84_vp_context_dtor,
103 .init = nv84_vp_context_init,
104 .fini = nv84_vp_context_fini,
105 .rd32 = _nouveau_vp_context_rd32,
106 .wr32 = _nouveau_vp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PVP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv84_vp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_vp_priv *priv;
125 int ret;
126
127 ret = nouveau_vp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x01020000;
133 nv_subdev(priv)->intr = nv84_vp_intr;
134 nv_engine(priv)->cclass = &nv84_vp_cclass;
135 nv_engine(priv)->sclass = nv84_vp_sclass;
136 return 0;
137}
138
139static void
140nv84_vp_dtor(struct nouveau_object *object)
141{
142 struct nv84_vp_priv *priv = (void *)object;
143 nouveau_vp_destroy(&priv->base);
144}
145
146static int
147nv84_vp_init(struct nouveau_object *object)
148{
149 struct nv84_vp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_vp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_vp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_vp_priv *priv = (void *)object;
163 return nouveau_vp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_vp_oclass = {
168 .handle = NV_ENGINE(VP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_vp_ctor,
171 .dtor = nv84_vp_dtor,
172 .init = nv84_vp_init,
173 .fini = nv84_vp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
new file mode 100644
index 000000000000..6180ae9800fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -0,0 +1,118 @@
1#ifndef __NOUVEAU_CLASS_H__
2#define __NOUVEAU_CLASS_H__
3
4/* Device class
5 *
6 * 0080: NV_DEVICE
7 */
8#define NV_DEVICE_CLASS 0x00000080
9
10#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL
11#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL
12#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL
13#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL
14#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL
15#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL
16#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL
17#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL
18#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL
19#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL
20#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL
21#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL
22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
26
27struct nv_device_class {
28 u64 device; /* device identifier, ~0 for client default */
29 u64 disable; /* disable particular subsystems */
30 u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
31};
32
33/* DMA object classes
34 *
35 * 0002: NV_DMA_FROM_MEMORY
36 * 0003: NV_DMA_TO_MEMORY
37 * 003d: NV_DMA_IN_MEMORY
38 */
39#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
40#define NV_DMA_TO_MEMORY_CLASS 0x00000003
41#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
42
43#define NV_DMA_TARGET_MASK 0x000000ff
44#define NV_DMA_TARGET_VM 0x00000000
45#define NV_DMA_TARGET_VRAM 0x00000001
46#define NV_DMA_TARGET_PCI 0x00000002
47#define NV_DMA_TARGET_PCI_US 0x00000003
48#define NV_DMA_TARGET_AGP 0x00000004
49#define NV_DMA_ACCESS_MASK 0x00000f00
50#define NV_DMA_ACCESS_VM 0x00000000
51#define NV_DMA_ACCESS_RD 0x00000100
52#define NV_DMA_ACCESS_WR 0x00000200
53#define NV_DMA_ACCESS_RDWR 0x00000300
54
55struct nv_dma_class {
56 u32 flags;
57 u32 pad0;
58 u64 start;
59 u64 limit;
60};
61
62/* DMA FIFO channel classes
63 *
64 * 006b: NV03_CHANNEL_DMA
65 * 006e: NV10_CHANNEL_DMA
66 * 176e: NV17_CHANNEL_DMA
67 * 406e: NV40_CHANNEL_DMA
68 * 506e: NV50_CHANNEL_DMA
69 * 826e: NV84_CHANNEL_DMA
70 */
71#define NV03_CHANNEL_DMA_CLASS 0x0000006b
72#define NV10_CHANNEL_DMA_CLASS 0x0000006e
73#define NV17_CHANNEL_DMA_CLASS 0x0000176e
74#define NV40_CHANNEL_DMA_CLASS 0x0000406e
75#define NV50_CHANNEL_DMA_CLASS 0x0000506e
76#define NV84_CHANNEL_DMA_CLASS 0x0000826e
77
78struct nv03_channel_dma_class {
79 u32 pushbuf;
80 u32 pad0;
81 u64 offset;
82};
83
84/* Indirect FIFO channel classes
85 *
86 * 506f: NV50_CHANNEL_IND
87 * 826f: NV84_CHANNEL_IND
88 * 906f: NVC0_CHANNEL_IND
89 * a06f: NVE0_CHANNEL_IND
90 */
91
92#define NV50_CHANNEL_IND_CLASS 0x0000506f
93#define NV84_CHANNEL_IND_CLASS 0x0000826f
94#define NVC0_CHANNEL_IND_CLASS 0x0000906f
95#define NVE0_CHANNEL_IND_CLASS 0x0000a06f
96
97struct nv50_channel_ind_class {
98 u32 pushbuf;
99 u32 ilength;
100 u64 ioffset;
101};
102
103#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001
104#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002
105#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004
106#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008
107#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010
108#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020
109#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040
110
111struct nve0_channel_ind_class {
112 u32 pushbuf;
113 u32 ilength;
114 u64 ioffset;
115 u32 engine;
116};
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
new file mode 100644
index 000000000000..0193532ceac9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -0,0 +1,42 @@
1#ifndef __NOUVEAU_CLIENT_H__
2#define __NOUVEAU_CLIENT_H__
3
4#include <core/namedb.h>
5
6struct nouveau_client {
7 struct nouveau_namedb base;
8 struct nouveau_handle *root;
9 struct nouveau_object *device;
10 char name[16];
11 u32 debug;
12 struct nouveau_vm *vm;
13};
14
15static inline struct nouveau_client *
16nv_client(void *obj)
17{
18#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
19 if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
20 nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
21#endif
22 return obj;
23}
24
25static inline struct nouveau_client *
26nouveau_client(void *obj)
27{
28 struct nouveau_object *client = nv_object(obj);
29 while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
30 client = client->parent;
31 return (void *)client;
32}
33
34#define nouveau_client_create(n,c,oc,od,d) \
35 nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
36
37int nouveau_client_create_(const char *name, u64 device, const char *cfg,
38 const char *dbg, int, void **);
39int nouveau_client_init(struct nouveau_client *);
40int nouveau_client_fini(struct nouveau_client *, bool suspend);
41
42#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
new file mode 100644
index 000000000000..9ea18dfcb4d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -0,0 +1,13 @@
1#ifndef __NOUVEAU_DEBUG_H__
2#define __NOUVEAU_DEBUG_H__
3
4#define NV_DBG_FATAL 0
5#define NV_DBG_ERROR 1
6#define NV_DBG_WARN 2
7#define NV_DBG_INFO 3
8#define NV_DBG_DEBUG 4
9#define NV_DBG_TRACE 5
10#define NV_DBG_PARANOIA 6
11#define NV_DBG_SPAM 7
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
new file mode 100644
index 000000000000..e58b6f0984c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -0,0 +1,136 @@
1#ifndef __NOUVEAU_DEVICE_H__
2#define __NOUVEAU_DEVICE_H__
3
4#include <core/object.h>
5#include <core/subdev.h>
6#include <core/engine.h>
7
8enum nv_subdev_type {
9 NVDEV_SUBDEV_DEVICE,
10 NVDEV_SUBDEV_VBIOS,
11
12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
13 * *any* of them are initialised. This subdev category is used
14 * for any subdevs that the VBIOS init table parsing may call out
15 * to during POST.
16 */
17 NVDEV_SUBDEV_DEVINIT,
18 NVDEV_SUBDEV_GPIO,
19 NVDEV_SUBDEV_I2C,
20 NVDEV_SUBDEV_CLOCK,
21 NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_CLOCK,
22
23 /* This grouping of subdevs are initialised right after they've
24 * been created, and are allowed to assume any subdevs in the
25 * list above them exist and have been initialised.
26 */
27 NVDEV_SUBDEV_MXM,
28 NVDEV_SUBDEV_MC,
29 NVDEV_SUBDEV_TIMER,
30 NVDEV_SUBDEV_FB,
31 NVDEV_SUBDEV_LTCG,
32 NVDEV_SUBDEV_IBUS,
33 NVDEV_SUBDEV_INSTMEM,
34 NVDEV_SUBDEV_VM,
35 NVDEV_SUBDEV_BAR,
36 NVDEV_SUBDEV_VOLT,
37 NVDEV_SUBDEV_THERM,
38
39 NVDEV_ENGINE_DMAOBJ,
40 NVDEV_ENGINE_FIFO,
41 NVDEV_ENGINE_SW,
42 NVDEV_ENGINE_GR,
43 NVDEV_ENGINE_MPEG,
44 NVDEV_ENGINE_ME,
45 NVDEV_ENGINE_VP,
46 NVDEV_ENGINE_CRYPT,
47 NVDEV_ENGINE_BSP,
48 NVDEV_ENGINE_PPP,
49 NVDEV_ENGINE_COPY0,
50 NVDEV_ENGINE_COPY1,
51 NVDEV_ENGINE_UNK1C1,
52 NVDEV_ENGINE_VENC,
53 NVDEV_ENGINE_DISP,
54
55 NVDEV_SUBDEV_NR,
56};
57
58struct nouveau_device {
59 struct nouveau_subdev base;
60 struct list_head head;
61
62 struct pci_dev *pdev;
63 u64 handle;
64
65 const char *cfgopt;
66 const char *dbgopt;
67 const char *name;
68 const char *cname;
69
70 enum {
71 NV_04 = 0x04,
72 NV_10 = 0x10,
73 NV_20 = 0x20,
74 NV_30 = 0x30,
75 NV_40 = 0x40,
76 NV_50 = 0x50,
77 NV_C0 = 0xc0,
78 NV_D0 = 0xd0,
79 NV_E0 = 0xe0,
80 } card_type;
81 u32 chipset;
82 u32 crystal;
83
84 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
85 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
86};
87
88static inline struct nouveau_device *
89nv_device(void *obj)
90{
91 struct nouveau_object *object = nv_object(obj);
92 struct nouveau_object *device = object;
93
94 if (device->engine)
95 device = device->engine;
96 if (device->parent)
97 device = device->parent;
98
99#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
100 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
101 (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) {
102 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
103 nv_hclass(object), nv_hclass(device));
104 }
105#endif
106
107 return (void *)device;
108}
109
110static inline struct nouveau_subdev *
111nouveau_subdev(void *obj, int sub)
112{
113 if (nv_device(obj)->subdev[sub])
114 return nv_subdev(nv_device(obj)->subdev[sub]);
115 return NULL;
116}
117
118static inline struct nouveau_engine *
119nouveau_engine(void *obj, int sub)
120{
121 struct nouveau_subdev *subdev = nouveau_subdev(obj, sub);
122 if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS))
123 return nv_engine(subdev);
124 return NULL;
125}
126
127static inline bool
128nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
129{
130 struct nouveau_device *device = nv_device(object);
131 return device->pdev->device == dev &&
132 device->pdev->subsystem_vendor == ven &&
133 device->pdev->subsystem_device == sub;
134}
135
136#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
new file mode 100644
index 000000000000..8a947b6872eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -0,0 +1,51 @@
1#ifndef __NOUVEAU_ENGCTX_H__
2#define __NOUVEAU_ENGCTX_H__
3
4#include <core/object.h>
5#include <core/gpuobj.h>
6
7#include <subdev/vm.h>
8
9#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
10#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
11
12struct nouveau_engctx {
13 struct nouveau_gpuobj base;
14 struct nouveau_vma vma;
15 struct list_head head;
16 unsigned long save;
17 u64 addr;
18};
19
20static inline struct nouveau_engctx *
21nv_engctx(void *obj)
22{
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24 if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
25 nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
26#endif
27 return obj;
28}
29
30#define nouveau_engctx_create(p,e,c,g,s,a,f,d) \
31 nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
32 sizeof(**d), (void **)d)
33
34int nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, struct nouveau_object *,
36 u32 size, u32 align, u32 flags,
37 int length, void **data);
38void nouveau_engctx_destroy(struct nouveau_engctx *);
39int nouveau_engctx_init(struct nouveau_engctx *);
40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
41
42void _nouveau_engctx_dtor(struct nouveau_object *);
43int _nouveau_engctx_init(struct nouveau_object *);
44int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
45#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32
46#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32
47
48struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr);
49void nouveau_engctx_put(struct nouveau_object *);
50
51#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engine.h b/drivers/gpu/drm/nouveau/core/include/core/engine.h
new file mode 100644
index 000000000000..666d06de77ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engine.h
@@ -0,0 +1,57 @@
1#ifndef __NOUVEAU_ENGINE_H__
2#define __NOUVEAU_ENGINE_H__
3
4#include <core/object.h>
5#include <core/subdev.h>
6
7#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
8#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
9
10struct nouveau_engine {
11 struct nouveau_subdev base;
12 struct nouveau_oclass *cclass;
13 struct nouveau_oclass *sclass;
14
15 struct list_head contexts;
16 spinlock_t lock;
17
18 void (*tile_prog)(struct nouveau_engine *, int region);
19 int (*tlb_flush)(struct nouveau_engine *);
20};
21
22static inline struct nouveau_engine *
23nv_engine(void *obj)
24{
25#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
26 if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
27 nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
28#endif
29 return obj;
30}
31
32static inline int
33nv_engidx(struct nouveau_object *object)
34{
35 return nv_subidx(object);
36}
37
38#define nouveau_engine_create(p,e,c,d,i,f,r) \
39 nouveau_engine_create_((p), (e), (c), (d), (i), (f), \
40 sizeof(**r),(void **)r)
41
42#define nouveau_engine_destroy(p) \
43 nouveau_subdev_destroy(&(p)->base)
44#define nouveau_engine_init(p) \
45 nouveau_subdev_init(&(p)->base)
46#define nouveau_engine_fini(p,s) \
47 nouveau_subdev_fini(&(p)->base, (s))
48
49int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *,
50 struct nouveau_oclass *, bool, const char *,
51 const char *, int, void **);
52
53#define _nouveau_engine_dtor _nouveau_subdev_dtor
54#define _nouveau_engine_init _nouveau_subdev_init
55#define _nouveau_engine_fini _nouveau_subdev_fini
56
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
new file mode 100644
index 000000000000..e7b1e181943b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -0,0 +1,23 @@
1#ifndef __NOUVEAU_ENUM_H__
2#define __NOUVEAU_ENUM_H__
3
4struct nouveau_enum {
5 u32 value;
6 const char *name;
7 const void *data;
8};
9
10const struct nouveau_enum *
11nouveau_enum_find(const struct nouveau_enum *, u32 value);
12
13void
14nouveau_enum_print(const struct nouveau_enum *en, u32 value);
15
16struct nouveau_bitfield {
17 u32 mask;
18 const char *name;
19};
20
21void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
new file mode 100644
index 000000000000..6eaff79377ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -0,0 +1,71 @@
1#ifndef __NOUVEAU_GPUOBJ_H__
2#define __NOUVEAU_GPUOBJ_H__
3
4#include <core/object.h>
5#include <core/device.h>
6#include <core/parent.h>
7#include <core/mm.h>
8
9struct nouveau_vma;
10struct nouveau_vm;
11
12#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
13#define NVOBJ_FLAG_ZERO_FREE 0x00000002
14#define NVOBJ_FLAG_HEAP 0x00000004
15
16struct nouveau_gpuobj {
17 struct nouveau_object base;
18 struct nouveau_object *parent;
19 struct nouveau_mm_node *node;
20 struct nouveau_mm heap;
21
22 u32 flags;
23 u64 addr;
24 u32 size;
25};
26
27static inline struct nouveau_gpuobj *
28nv_gpuobj(void *obj)
29{
30#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
31 if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
32 nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
33#endif
34 return obj;
35}
36
37#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d) \
38 nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \
39 sizeof(**d), (void **)d)
40#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base)
41#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s))
42int nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *,
43 struct nouveau_oclass *, u32 pclass,
44 struct nouveau_object *, u32 size, u32 align,
45 u32 flags, int length, void **);
46void nouveau_gpuobj_destroy(struct nouveau_gpuobj *);
47
48int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *,
49 u32 size, u32 align, u32 flags,
50 struct nouveau_gpuobj **);
51int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *,
52 struct nouveau_gpuobj **);
53
54int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *);
55int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *,
56 u32 access, struct nouveau_vma *);
57void nouveau_gpuobj_unmap(struct nouveau_vma *);
58
59static inline void
60nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
61{
62 nouveau_object_ref(&obj->base, (struct nouveau_object **)ref);
63}
64
65void _nouveau_gpuobj_dtor(struct nouveau_object *);
66int _nouveau_gpuobj_init(struct nouveau_object *);
67int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
69void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
70
71#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
new file mode 100644
index 000000000000..363674cdf8ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -0,0 +1,31 @@
1#ifndef __NOUVEAU_HANDLE_H__
2#define __NOUVEAU_HANDLE_H__
3
4struct nouveau_handle {
5 struct nouveau_namedb *namedb;
6 struct list_head node;
7
8 struct list_head head;
9 struct list_head tree;
10 u32 name;
11 u32 priv;
12
13 struct nouveau_handle *parent;
14 struct nouveau_object *object;
15};
16
17int nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle,
18 struct nouveau_object *, struct nouveau_handle **);
19void nouveau_handle_destroy(struct nouveau_handle *);
20int nouveau_handle_init(struct nouveau_handle *);
21int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
22
23struct nouveau_object *
24nouveau_handle_ref(struct nouveau_object *, u32 name);
25
26struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16);
27struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64);
28struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32);
29void nouveau_handle_put(struct nouveau_handle *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
new file mode 100644
index 000000000000..f808131c5cd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/math.h
@@ -0,0 +1,16 @@
1#ifndef __NOUVEAU_MATH_H__
2#define __NOUVEAU_MATH_H__
3
4static inline int
5log2i(u64 base)
6{
7 u64 temp = base >> 1;
8 int log2;
9
10 for (log2 = 0; temp; log2++, temp >>= 1) {
11 }
12
13 return (base & (base - 1)) ? log2 + 1: log2;
14}
15
16#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
new file mode 100644
index 000000000000..9ee9bf4028ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -0,0 +1,33 @@
1#ifndef __NOUVEAU_MM_H__
2#define __NOUVEAU_MM_H__
3
4struct nouveau_mm_node {
5 struct list_head nl_entry;
6 struct list_head fl_entry;
7 struct list_head rl_entry;
8
9 u8 type;
10 u32 offset;
11 u32 length;
12};
13
14struct nouveau_mm {
15 struct list_head nodes;
16 struct list_head free;
17
18 struct mutex mutex;
19
20 u32 block_size;
21 int heap_nodes;
22 u32 heap_size;
23};
24
25int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
26int nouveau_mm_fini(struct nouveau_mm *);
27int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
28 u32 align, struct nouveau_mm_node **);
29int nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
30 u32 align, struct nouveau_mm_node **);
31void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
new file mode 100644
index 000000000000..8897e0886085
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -0,0 +1,56 @@
1#ifndef __NOUVEAU_NAMEDB_H__
2#define __NOUVEAU_NAMEDB_H__
3
4#include <core/parent.h>
5
6struct nouveau_handle;
7
8struct nouveau_namedb {
9 struct nouveau_parent base;
10 rwlock_t lock;
11 struct list_head list;
12};
13
14static inline struct nouveau_namedb *
15nv_namedb(void *obj)
16{
17#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
18 if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
19 nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
20#endif
21 return obj;
22}
23
24#define nouveau_namedb_create(p,e,c,v,s,m,d) \
25 nouveau_namedb_create_((p), (e), (c), (v), (s), (m), \
26 sizeof(**d), (void **)d)
27#define nouveau_namedb_init(p) \
28 nouveau_parent_init(&(p)->base)
29#define nouveau_namedb_fini(p,s) \
30 nouveau_parent_fini(&(p)->base, (s))
31#define nouveau_namedb_destroy(p) \
32 nouveau_parent_destroy(&(p)->base)
33
34int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, u32 pclass,
36 struct nouveau_oclass *, u32 engcls,
37 int size, void **);
38
39int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, void *, u32,
41 struct nouveau_object **);
42#define _nouveau_namedb_dtor _nouveau_parent_dtor
43#define _nouveau_namedb_init _nouveau_parent_init
44#define _nouveau_namedb_fini _nouveau_parent_fini
45
46int nouveau_namedb_insert(struct nouveau_namedb *, u32 name,
47 struct nouveau_object *, struct nouveau_handle *);
48void nouveau_namedb_remove(struct nouveau_handle *);
49
50struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32);
51struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16);
52struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64);
53struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32);
54void nouveau_namedb_put(struct nouveau_handle *);
55
56#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
new file mode 100644
index 000000000000..818feabbf4a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -0,0 +1,188 @@
1#ifndef __NOUVEAU_OBJECT_H__
2#define __NOUVEAU_OBJECT_H__
3
4#include <core/os.h>
5#include <core/printk.h>
6
7#define NV_PARENT_CLASS 0x80000000
8#define NV_NAMEDB_CLASS 0x40000000
9#define NV_CLIENT_CLASS 0x20000000
10#define NV_SUBDEV_CLASS 0x10000000
11#define NV_ENGINE_CLASS 0x08000000
12#define NV_MEMOBJ_CLASS 0x04000000
13#define NV_GPUOBJ_CLASS 0x02000000
14#define NV_ENGCTX_CLASS 0x01000000
15#define NV_OBJECT_CLASS 0x0000ffff
16
17struct nouveau_object {
18 struct nouveau_oclass *oclass;
19 struct nouveau_object *parent;
20 struct nouveau_object *engine;
21 atomic_t refcount;
22 atomic_t usecount;
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad
25 struct list_head list;
26 u32 _magic;
27#endif
28};
29
30static inline struct nouveau_object *
31nv_object(void *obj)
32{
33#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
34 if (likely(obj)) {
35 struct nouveau_object *object = obj;
36 if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC))
37 nv_assert("BAD CAST -> NvObject, invalid magic");
38 }
39#endif
40 return obj;
41}
42
43#define nouveau_object_create(p,e,c,s,d) \
44 nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
45int nouveau_object_create_(struct nouveau_object *, struct nouveau_object *,
46 struct nouveau_oclass *, u32, int size, void **);
47void nouveau_object_destroy(struct nouveau_object *);
48int nouveau_object_init(struct nouveau_object *);
49int nouveau_object_fini(struct nouveau_object *, bool suspend);
50
51extern struct nouveau_ofuncs nouveau_object_ofuncs;
52
53struct nouveau_oclass {
54 u32 handle;
55 struct nouveau_ofuncs *ofuncs;
56 struct nouveau_omthds *omthds;
57};
58
59#define nv_oclass(o) nv_object(o)->oclass
60#define nv_hclass(o) nv_oclass(o)->handle
61#define nv_iclass(o,i) (nv_hclass(o) & (i))
62#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS)
63
64static inline struct nouveau_object *
65nv_pclass(struct nouveau_object *parent, u32 oclass)
66{
67 while (parent && !nv_iclass(parent, oclass))
68 parent = parent->parent;
69 return parent;
70}
71
72struct nouveau_omthds {
73 u32 method;
74 int (*call)(struct nouveau_object *, u32, void *, u32);
75};
76
77struct nouveau_ofuncs {
78 int (*ctor)(struct nouveau_object *, struct nouveau_object *,
79 struct nouveau_oclass *, void *data, u32 size,
80 struct nouveau_object **);
81 void (*dtor)(struct nouveau_object *);
82 int (*init)(struct nouveau_object *);
83 int (*fini)(struct nouveau_object *, bool suspend);
84 u8 (*rd08)(struct nouveau_object *, u32 offset);
85 u16 (*rd16)(struct nouveau_object *, u32 offset);
86 u32 (*rd32)(struct nouveau_object *, u32 offset);
87 void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
88 void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
89 void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
90};
91
92static inline struct nouveau_ofuncs *
93nv_ofuncs(void *obj)
94{
95 return nv_oclass(obj)->ofuncs;
96}
97
98int nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
99 struct nouveau_oclass *, void *, u32,
100 struct nouveau_object **);
101void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
102int nouveau_object_inc(struct nouveau_object *);
103int nouveau_object_dec(struct nouveau_object *, bool suspend);
104
105int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
106 u16 oclass, void *data, u32 size,
107 struct nouveau_object **);
108int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
109void nouveau_object_debug(void);
110
111static inline int
112nv_call(void *obj, u32 mthd, u32 data)
113{
114 struct nouveau_omthds *method = nv_oclass(obj)->omthds;
115
116 while (method && method->call) {
117 if (method->method == mthd)
118 return method->call(obj, mthd, &data, sizeof(data));
119 method++;
120 }
121
122 return -EINVAL;
123}
124
125static inline u8
126nv_ro08(void *obj, u32 addr)
127{
128 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
129 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
130 return data;
131}
132
133static inline u16
134nv_ro16(void *obj, u32 addr)
135{
136 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
137 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
138 return data;
139}
140
141static inline u32
142nv_ro32(void *obj, u32 addr)
143{
144 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
145 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
146 return data;
147}
148
149static inline void
150nv_wo08(void *obj, u32 addr, u8 data)
151{
152 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
153 nv_ofuncs(obj)->wr08(obj, addr, data);
154}
155
156static inline void
157nv_wo16(void *obj, u32 addr, u16 data)
158{
159 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
160 nv_ofuncs(obj)->wr16(obj, addr, data);
161}
162
163static inline void
164nv_wo32(void *obj, u32 addr, u32 data)
165{
166 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
167 nv_ofuncs(obj)->wr32(obj, addr, data);
168}
169
170static inline u32
171nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
172{
173 u32 temp = nv_ro32(obj, addr);
174 nv_wo32(obj, addr, (temp & ~mask) | data);
175 return temp;
176}
177
178static inline bool
179nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
180{
181 while (len--) {
182 if (nv_ro08(obj, addr++) != *(str++))
183 return false;
184 }
185 return true;
186}
187
188#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
new file mode 100644
index 000000000000..27074957fd21
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -0,0 +1,11 @@
1#ifndef __NOUVEAU_OPTION_H__
2#define __NOUVEAU_OPTION_H__
3
4#include <core/os.h>
5
6const char *nouveau_stropt(const char *optstr, const char *opt, int *len);
7bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
8
9int nouveau_dbgopt(const char *optstr, const char *sub);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
new file mode 100644
index 000000000000..d3aa251a5eb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -0,0 +1,64 @@
1#ifndef __NOUVEAU_PARENT_H__
2#define __NOUVEAU_PARENT_H__
3
4#include <core/device.h>
5#include <core/object.h>
6
7struct nouveau_sclass {
8 struct nouveau_sclass *sclass;
9 struct nouveau_engine *engine;
10 struct nouveau_oclass *oclass;
11};
12
13struct nouveau_parent {
14 struct nouveau_object base;
15
16 struct nouveau_sclass *sclass;
17 u32 engine;
18
19 int (*context_attach)(struct nouveau_object *,
20 struct nouveau_object *);
21 int (*context_detach)(struct nouveau_object *, bool suspend,
22 struct nouveau_object *);
23
24 int (*object_attach)(struct nouveau_object *parent,
25 struct nouveau_object *object, u32 name);
26 void (*object_detach)(struct nouveau_object *parent, int cookie);
27};
28
29static inline struct nouveau_parent *
30nv_parent(void *obj)
31{
32#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
33 if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
34 nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
35#endif
36 return obj;
37}
38
39#define nouveau_parent_create(p,e,c,v,s,m,d) \
40 nouveau_parent_create_((p), (e), (c), (v), (s), (m), \
41 sizeof(**d), (void **)d)
42#define nouveau_parent_init(p) \
43 nouveau_object_init(&(p)->base)
44#define nouveau_parent_fini(p,s) \
45 nouveau_object_fini(&(p)->base, (s))
46
47int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
48 struct nouveau_oclass *, u32 pclass,
49 struct nouveau_oclass *, u64 engcls,
50 int size, void **);
51void nouveau_parent_destroy(struct nouveau_parent *);
52
53int _nouveau_parent_ctor(struct nouveau_object *, struct nouveau_object *,
54 struct nouveau_oclass *, void *, u32,
55 struct nouveau_object **);
56void _nouveau_parent_dtor(struct nouveau_object *);
57#define _nouveau_parent_init _nouveau_object_init
58#define _nouveau_parent_fini _nouveau_object_fini
59
60int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
61 struct nouveau_object **pengine,
62 struct nouveau_oclass **poclass);
63
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
new file mode 100644
index 000000000000..1d629664f32d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -0,0 +1,39 @@
1#ifndef __NOUVEAU_PRINTK_H__
2#define __NOUVEAU_PRINTK_H__
3
4#include <core/os.h>
5#include <core/debug.h>
6
7struct nouveau_object;
8
9#define NV_PRINTK_FATAL KERN_CRIT
10#define NV_PRINTK_ERROR KERN_ERR
11#define NV_PRINTK_WARN KERN_WARNING
12#define NV_PRINTK_INFO KERN_INFO
13#define NV_PRINTK_DEBUG KERN_DEBUG
14#define NV_PRINTK_PARANOIA KERN_DEBUG
15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG
17
18void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
19
20#define nv_printk(o,l,f,a...) do { \
21 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
22 nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \
23} while(0)
24
25#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
26#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
27#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
28#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
29#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
30#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
31#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
32
33#define nv_assert(f,a...) do { \
34 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
35 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
36 BUG_ON(1); \
37} while(0)
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
new file mode 100644
index 000000000000..47e4cacbca37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -0,0 +1,23 @@
1#ifndef __NOUVEAU_RAMHT_H__
2#define __NOUVEAU_RAMHT_H__
3
4#include <core/gpuobj.h>
5
6struct nouveau_ramht {
7 struct nouveau_gpuobj base;
8 int bits;
9};
10
11int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
12 u32 handle, u32 context);
13void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
14int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
15 u32 size, u32 align, struct nouveau_ramht **);
16
17static inline void
18nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
19{
20 nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
21}
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
new file mode 100644
index 000000000000..e9632e931616
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
@@ -0,0 +1,118 @@
1#ifndef __NOUVEAU_SUBDEV_H__
2#define __NOUVEAU_SUBDEV_H__
3
4#include <core/object.h>
5
6#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
7#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
8
9struct nouveau_subdev {
10 struct nouveau_object base;
11 struct mutex mutex;
12 const char *name;
13 void __iomem *mmio;
14 u32 debug;
15 u32 unit;
16
17 void (*intr)(struct nouveau_subdev *);
18};
19
20static inline struct nouveau_subdev *
21nv_subdev(void *obj)
22{
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24 if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
25 nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
26#endif
27 return obj;
28}
29
30static inline int
31nv_subidx(struct nouveau_object *object)
32{
33 return nv_hclass(nv_subdev(object)) & 0xff;
34}
35
36#define nouveau_subdev_create(p,e,o,v,s,f,d) \
37 nouveau_subdev_create_((p), (e), (o), (v), (s), (f), \
38 sizeof(**d),(void **)d)
39
40int nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *,
41 struct nouveau_oclass *, u32 pclass,
42 const char *sname, const char *fname,
43 int size, void **);
44void nouveau_subdev_destroy(struct nouveau_subdev *);
45int nouveau_subdev_init(struct nouveau_subdev *);
46int nouveau_subdev_fini(struct nouveau_subdev *, bool suspend);
47void nouveau_subdev_reset(struct nouveau_object *);
48
49void _nouveau_subdev_dtor(struct nouveau_object *);
50int _nouveau_subdev_init(struct nouveau_object *);
51int _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
52
53#define s_printk(s,l,f,a...) do { \
54 if ((s)->debug >= OS_DBG_##l) { \
55 nv_printk((s)->base.parent, (s)->name, l, f, ##a); \
56 } \
57} while(0)
58
59static inline u8
60nv_rd08(void *obj, u32 addr)
61{
62 struct nouveau_subdev *subdev = nv_subdev(obj);
63 u8 data = ioread8(subdev->mmio + addr);
64 nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
65 return data;
66}
67
68static inline u16
69nv_rd16(void *obj, u32 addr)
70{
71 struct nouveau_subdev *subdev = nv_subdev(obj);
72 u16 data = ioread16_native(subdev->mmio + addr);
73 nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
74 return data;
75}
76
77static inline u32
78nv_rd32(void *obj, u32 addr)
79{
80 struct nouveau_subdev *subdev = nv_subdev(obj);
81 u32 data = ioread32_native(subdev->mmio + addr);
82 nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
83 return data;
84}
85
86static inline void
87nv_wr08(void *obj, u32 addr, u8 data)
88{
89 struct nouveau_subdev *subdev = nv_subdev(obj);
90 nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
91 iowrite8(data, subdev->mmio + addr);
92}
93
94static inline void
95nv_wr16(void *obj, u32 addr, u16 data)
96{
97 struct nouveau_subdev *subdev = nv_subdev(obj);
98 nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
99 iowrite16_native(data, subdev->mmio + addr);
100}
101
102static inline void
103nv_wr32(void *obj, u32 addr, u32 data)
104{
105 struct nouveau_subdev *subdev = nv_subdev(obj);
106 nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
107 iowrite32_native(data, subdev->mmio + addr);
108}
109
110static inline u32
111nv_mask(void *obj, u32 addr, u32 mask, u32 data)
112{
113 u32 temp = nv_rd32(obj, addr);
114 nv_wr32(obj, addr, (temp & ~mask) | data);
115 return temp;
116}
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 000000000000..75d1ed5f85fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_BSP_H__
2#define __NOUVEAU_BSP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_bsp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_bsp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_bsp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_bsp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_bsp_context_init _nouveau_engctx_init
22#define _nouveau_bsp_context_fini _nouveau_engctx_fini
23#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_bsp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_bsp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
32#define nouveau_bsp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_bsp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_bsp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_bsp_dtor _nouveau_engine_dtor
40#define _nouveau_bsp_init _nouveau_engine_init
41#define _nouveau_bsp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_bsp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 000000000000..70b9d8c5fcf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,49 @@
1#ifndef __NOUVEAU_COPY_H__
2#define __NOUVEAU_COPY_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_copy_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_copy_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_copy_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_copy_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
21#define _nouveau_copy_context_init _nouveau_engctx_init
22#define _nouveau_copy_context_fini _nouveau_engctx_fini
23#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_copy {
27 struct nouveau_engine base;
28};
29
30#define nouveau_copy_create(p,e,c,y,i,d) \
31 nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
32#define nouveau_copy_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_copy_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_copy_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_copy_dtor _nouveau_engine_dtor
40#define _nouveau_copy_init _nouveau_engine_init
41#define _nouveau_copy_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nva3_copy_oclass;
44extern struct nouveau_oclass nvc0_copy0_oclass;
45extern struct nouveau_oclass nvc0_copy1_oclass;
46extern struct nouveau_oclass nve0_copy0_oclass;
47extern struct nouveau_oclass nve0_copy1_oclass;
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 000000000000..e3674743baaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,46 @@
1#ifndef __NOUVEAU_CRYPT_H__
2#define __NOUVEAU_CRYPT_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_crypt_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_crypt_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_crypt_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_crypt_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
21#define _nouveau_crypt_context_init _nouveau_engctx_init
22#define _nouveau_crypt_context_fini _nouveau_engctx_fini
23#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_crypt {
27 struct nouveau_engine base;
28};
29
30#define nouveau_crypt_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
32#define nouveau_crypt_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_crypt_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_crypt_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_crypt_dtor _nouveau_engine_dtor
40#define _nouveau_crypt_init _nouveau_engine_init
41#define _nouveau_crypt_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_crypt_oclass;
44extern struct nouveau_oclass nv98_crypt_oclass;
45
46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 000000000000..38ec1252cbaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,44 @@
1#ifndef __NOUVEAU_DISP_H__
2#define __NOUVEAU_DISP_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6#include <core/device.h>
7
8struct nouveau_disp {
9 struct nouveau_engine base;
10
11 struct {
12 struct list_head list;
13 spinlock_t lock;
14 void (*notify)(void *, int);
15 void (*get)(void *, int);
16 void (*put)(void *, int);
17 void *data;
18 } vblank;
19};
20
21static inline struct nouveau_disp *
22nouveau_disp(void *obj)
23{
24 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
25}
26
27#define nouveau_disp_create(p,e,c,i,x,d) \
28 nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
29#define nouveau_disp_destroy(d) \
30 nouveau_engine_destroy(&(d)->base)
31#define nouveau_disp_init(d) \
32 nouveau_engine_init(&(d)->base)
33#define nouveau_disp_fini(d,s) \
34 nouveau_engine_fini(&(d)->base, (s))
35
36#define _nouveau_disp_dtor _nouveau_engine_dtor
37#define _nouveau_disp_init _nouveau_engine_init
38#define _nouveau_disp_fini _nouveau_engine_fini
39
40extern struct nouveau_oclass nv04_disp_oclass;
41extern struct nouveau_oclass nv50_disp_oclass;
42extern struct nouveau_oclass nvd0_disp_oclass;
43
44#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 000000000000..700ccbb1941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,57 @@
1#ifndef __NOUVEAU_DMAOBJ_H__
2#define __NOUVEAU_DMAOBJ_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6
7struct nouveau_gpuobj;
8
9struct nouveau_dmaobj {
10 struct nouveau_object base;
11 u32 target;
12 u32 access;
13 u64 start;
14 u64 limit;
15};
16
17#define nouveau_dmaobj_create(p,e,c,a,s,d) \
18 nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
19#define nouveau_dmaobj_destroy(p) \
20 nouveau_object_destroy(&(p)->base)
21#define nouveau_dmaobj_init(p) \
22 nouveau_object_init(&(p)->base)
23#define nouveau_dmaobj_fini(p,s) \
24 nouveau_object_fini(&(p)->base, (s))
25
26int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *data, u32 size,
28 int length, void **);
29
30#define _nouveau_dmaobj_dtor nouveau_object_destroy
31#define _nouveau_dmaobj_init nouveau_object_init
32#define _nouveau_dmaobj_fini nouveau_object_fini
33
34struct nouveau_dmaeng {
35 struct nouveau_engine base;
36 int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
37 struct nouveau_dmaobj *, struct nouveau_gpuobj **);
38};
39
40#define nouveau_dmaeng_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
42#define nouveau_dmaeng_destroy(p) \
43 nouveau_engine_destroy(&(p)->base)
44#define nouveau_dmaeng_init(p) \
45 nouveau_engine_init(&(p)->base)
46#define nouveau_dmaeng_fini(p,s) \
47 nouveau_engine_fini(&(p)->base, (s))
48
49#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
50#define _nouveau_dmaeng_init _nouveau_engine_init
51#define _nouveau_dmaeng_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_dmaeng_oclass;
54extern struct nouveau_oclass nv50_dmaeng_oclass;
55extern struct nouveau_oclass nvc0_dmaeng_oclass;
56
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
new file mode 100644
index 000000000000..d67fed1e3970
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -0,0 +1,111 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4#include <core/namedb.h>
5#include <core/gpuobj.h>
6#include <core/engine.h>
7
8struct nouveau_fifo_chan {
9 struct nouveau_namedb base;
10 struct nouveau_dmaobj *pushdma;
11 struct nouveau_gpuobj *pushgpu;
12 void __iomem *user;
13 u32 size;
14 u16 chid;
15 atomic_t refcnt; /* NV04_NVSW_SET_REF */
16};
17
18static inline struct nouveau_fifo_chan *
19nouveau_fifo_chan(void *obj)
20{
21 return (void *)nv_namedb(obj);
22}
23
24#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
25 nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
26 (m), sizeof(**d), (void **)d)
27#define nouveau_fifo_channel_init(p) \
28 nouveau_namedb_init(&(p)->base)
29#define nouveau_fifo_channel_fini(p,s) \
30 nouveau_namedb_fini(&(p)->base, (s))
31
32int nouveau_fifo_channel_create_(struct nouveau_object *,
33 struct nouveau_object *,
34 struct nouveau_oclass *,
35 int bar, u32 addr, u32 size, u32 push,
36 u32 engmask, int len, void **);
37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
38
39#define _nouveau_fifo_channel_init _nouveau_namedb_init
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41
42void _nouveau_fifo_channel_dtor(struct nouveau_object *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
45
46struct nouveau_fifo_base {
47 struct nouveau_gpuobj base;
48};
49
50#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
51 nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
52#define nouveau_fifo_context_destroy(p) \
53 nouveau_gpuobj_destroy(&(p)->base)
54#define nouveau_fifo_context_init(p) \
55 nouveau_gpuobj_init(&(p)->base)
56#define nouveau_fifo_context_fini(p,s) \
57 nouveau_gpuobj_fini(&(p)->base, (s))
58
59#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
60#define _nouveau_fifo_context_init _nouveau_gpuobj_init
61#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
62#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
63#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
64
65struct nouveau_fifo {
66 struct nouveau_engine base;
67
68 struct nouveau_object **channel;
69 spinlock_t lock;
70 u16 min;
71 u16 max;
72
73 int (*chid)(struct nouveau_fifo *, struct nouveau_object *);
74 void (*pause)(struct nouveau_fifo *, unsigned long *);
75 void (*start)(struct nouveau_fifo *, unsigned long *);
76};
77
78static inline struct nouveau_fifo *
79nouveau_fifo(void *obj)
80{
81 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
82}
83
84#define nouveau_fifo_create(o,e,c,fc,lc,d) \
85 nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
86#define nouveau_fifo_init(p) \
87 nouveau_engine_init(&(p)->base)
88#define nouveau_fifo_fini(p,s) \
89 nouveau_engine_fini(&(p)->base, (s))
90
91int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
92 struct nouveau_oclass *, int min, int max,
93 int size, void **);
94void nouveau_fifo_destroy(struct nouveau_fifo *);
95
96#define _nouveau_fifo_init _nouveau_engine_init
97#define _nouveau_fifo_fini _nouveau_engine_fini
98
99extern struct nouveau_oclass nv04_fifo_oclass;
100extern struct nouveau_oclass nv10_fifo_oclass;
101extern struct nouveau_oclass nv17_fifo_oclass;
102extern struct nouveau_oclass nv40_fifo_oclass;
103extern struct nouveau_oclass nv50_fifo_oclass;
104extern struct nouveau_oclass nv84_fifo_oclass;
105extern struct nouveau_oclass nvc0_fifo_oclass;
106extern struct nouveau_oclass nve0_fifo_oclass;
107
108void nv04_fifo_intr(struct nouveau_subdev *);
109int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
110
111#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 000000000000..6943b40d0817
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,72 @@
1#ifndef __NOUVEAU_GRAPH_H__
2#define __NOUVEAU_GRAPH_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6#include <core/enum.h>
7
8struct nouveau_graph_chan {
9 struct nouveau_engctx base;
10};
11
12#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
13 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
14#define nouveau_graph_context_destroy(d) \
15 nouveau_engctx_destroy(&(d)->base)
16#define nouveau_graph_context_init(d) \
17 nouveau_engctx_init(&(d)->base)
18#define nouveau_graph_context_fini(d,s) \
19 nouveau_engctx_fini(&(d)->base, (s))
20
21#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
22#define _nouveau_graph_context_init _nouveau_engctx_init
23#define _nouveau_graph_context_fini _nouveau_engctx_fini
24#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
25#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
26
27struct nouveau_graph {
28 struct nouveau_engine base;
29};
30
31static inline struct nouveau_graph *
32nouveau_graph(void *obj)
33{
34 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
35}
36
37#define nouveau_graph_create(p,e,c,y,d) \
38 nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
39#define nouveau_graph_destroy(d) \
40 nouveau_engine_destroy(&(d)->base)
41#define nouveau_graph_init(d) \
42 nouveau_engine_init(&(d)->base)
43#define nouveau_graph_fini(d,s) \
44 nouveau_engine_fini(&(d)->base, (s))
45
46#define _nouveau_graph_dtor _nouveau_engine_dtor
47#define _nouveau_graph_init _nouveau_engine_init
48#define _nouveau_graph_fini _nouveau_engine_fini
49
50extern struct nouveau_oclass nv04_graph_oclass;
51extern struct nouveau_oclass nv10_graph_oclass;
52extern struct nouveau_oclass nv20_graph_oclass;
53extern struct nouveau_oclass nv25_graph_oclass;
54extern struct nouveau_oclass nv2a_graph_oclass;
55extern struct nouveau_oclass nv30_graph_oclass;
56extern struct nouveau_oclass nv34_graph_oclass;
57extern struct nouveau_oclass nv35_graph_oclass;
58extern struct nouveau_oclass nv40_graph_oclass;
59extern struct nouveau_oclass nv50_graph_oclass;
60extern struct nouveau_oclass nvc0_graph_oclass;
61extern struct nouveau_oclass nve0_graph_oclass;
62
63extern const struct nouveau_bitfield nv04_graph_nsource[];
64extern struct nouveau_ofuncs nv04_graph_ofuncs;
65bool nv04_graph_idle(void *obj);
66
67extern const struct nouveau_bitfield nv10_graph_intr_name[];
68extern const struct nouveau_bitfield nv10_graph_nstatus[];
69
70extern const struct nouveau_enum nv50_data_error_names[];
71
72#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 000000000000..bbf0d4a5bbd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
1#ifndef __NOUVEAU_MPEG_H__
2#define __NOUVEAU_MPEG_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_mpeg_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_mpeg_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_mpeg_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_mpeg_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
21#define _nouveau_mpeg_context_init _nouveau_engctx_init
22#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
23#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_mpeg {
27 struct nouveau_engine base;
28};
29
30#define nouveau_mpeg_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
32#define nouveau_mpeg_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_mpeg_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_mpeg_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_mpeg_dtor _nouveau_engine_dtor
40#define _nouveau_mpeg_init _nouveau_engine_init
41#define _nouveau_mpeg_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv31_mpeg_oclass;
44extern struct nouveau_oclass nv40_mpeg_oclass;
45extern struct nouveau_oclass nv50_mpeg_oclass;
46extern struct nouveau_oclass nv84_mpeg_oclass;
47
48extern struct nouveau_oclass nv31_mpeg_sclass[];
49void nv31_mpeg_intr(struct nouveau_subdev *);
50void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
51int nv31_mpeg_init(struct nouveau_object *);
52
53extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
54int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, void *, u32,
56 struct nouveau_object **);
57int nv50_mpeg_tlb_flush(struct nouveau_engine *);
58void nv50_mpeg_intr(struct nouveau_subdev *);
59int nv50_mpeg_init(struct nouveau_object *);
60
61#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 000000000000..74d554fb3281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_PPP_H__
2#define __NOUVEAU_PPP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_ppp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_ppp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_ppp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_ppp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_ppp_context_init _nouveau_engctx_init
22#define _nouveau_ppp_context_fini _nouveau_engctx_fini
23#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_ppp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_ppp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
32#define nouveau_ppp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_ppp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_ppp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_ppp_dtor _nouveau_engine_dtor
40#define _nouveau_ppp_init _nouveau_engine_init
41#define _nouveau_ppp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv98_ppp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 000000000000..c945691c8564
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_software_chan {
8 struct nouveau_engctx base;
9
10 struct {
11 struct list_head head;
12 u32 channel;
13 u32 ctxdma;
14 u64 offset;
15 u32 value;
16 u32 crtc;
17 } vblank;
18
19 int (*flip)(void *);
20 void *flip_data;
21};
22
23#define nouveau_software_context_create(p,e,c,d) \
24 nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
25#define nouveau_software_context_destroy(d) \
26 nouveau_engctx_destroy(&(d)->base)
27#define nouveau_software_context_init(d) \
28 nouveau_engctx_init(&(d)->base)
29#define nouveau_software_context_fini(d,s) \
30 nouveau_engctx_fini(&(d)->base, (s))
31
32#define _nouveau_software_context_dtor _nouveau_engctx_dtor
33#define _nouveau_software_context_init _nouveau_engctx_init
34#define _nouveau_software_context_fini _nouveau_engctx_fini
35
36struct nouveau_software {
37 struct nouveau_engine base;
38};
39
40#define nouveau_software_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
42#define nouveau_software_destroy(d) \
43 nouveau_engine_destroy(&(d)->base)
44#define nouveau_software_init(d) \
45 nouveau_engine_init(&(d)->base)
46#define nouveau_software_fini(d,s) \
47 nouveau_engine_fini(&(d)->base, (s))
48
49#define _nouveau_software_dtor _nouveau_engine_dtor
50#define _nouveau_software_init _nouveau_engine_init
51#define _nouveau_software_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_software_oclass;
54extern struct nouveau_oclass nv10_software_oclass;
55extern struct nouveau_oclass nv50_software_oclass;
56extern struct nouveau_oclass nvc0_software_oclass;
57
58void nv04_software_intr(struct nouveau_subdev *);
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 000000000000..05cd08fba377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_VP_H__
2#define __NOUVEAU_VP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_vp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_vp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_vp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_vp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_vp_context_init _nouveau_engctx_init
22#define _nouveau_vp_context_fini _nouveau_engctx_fini
23#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_vp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_vp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
32#define nouveau_vp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_vp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_vp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_vp_dtor _nouveau_engine_dtor
40#define _nouveau_vp_init _nouveau_engine_init
41#define _nouveau_vp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_vp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
new file mode 100644
index 000000000000..4f4ff4502c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -0,0 +1,55 @@
1#ifndef __NOUVEAU_BAR_H__
2#define __NOUVEAU_BAR_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/fb.h>
8
9struct nouveau_vma;
10
11struct nouveau_bar {
12 struct nouveau_subdev base;
13
14 int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
15 struct nouveau_mem *, struct nouveau_object **);
16 void __iomem *iomem;
17
18 int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
19 u32 flags, struct nouveau_vma *);
20 int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
21 u32 flags, struct nouveau_vma *);
22 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
23 void (*flush)(struct nouveau_bar *);
24};
25
26static inline struct nouveau_bar *
27nouveau_bar(void *obj)
28{
29 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
30}
31
32#define nouveau_bar_create(p,e,o,d) \
33 nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
34#define nouveau_bar_init(p) \
35 nouveau_subdev_init(&(p)->base)
36#define nouveau_bar_fini(p,s) \
37 nouveau_subdev_fini(&(p)->base, (s))
38
39int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, int, void **);
41void nouveau_bar_destroy(struct nouveau_bar *);
42
43void _nouveau_bar_dtor(struct nouveau_object *);
44#define _nouveau_bar_init _nouveau_subdev_init
45#define _nouveau_bar_fini _nouveau_subdev_fini
46
47extern struct nouveau_oclass nv50_bar_oclass;
48extern struct nouveau_oclass nvc0_bar_oclass;
49
50int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
51 struct nouveau_mem *, struct nouveau_object **);
52
53void nv84_bar_flush(struct nouveau_bar *);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
new file mode 100644
index 000000000000..d145b25e6be4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -0,0 +1,34 @@
1#ifndef __NOUVEAU_BIOS_H__
2#define __NOUVEAU_BIOS_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_bios {
8 struct nouveau_subdev base;
9 u32 size;
10 u8 *data;
11
12 u32 bmp_offset;
13 u32 bit_offset;
14
15 struct {
16 u8 major;
17 u8 chip;
18 u8 minor;
19 u8 micro;
20 } version;
21};
22
23static inline struct nouveau_bios *
24nouveau_bios(void *obj)
25{
26 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS];
27}
28
29u8 nvbios_checksum(const u8 *data, int size);
30u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
31
32extern struct nouveau_oclass nouveau_bios_oclass;
33
34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
new file mode 100644
index 000000000000..73f060b07981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
@@ -0,0 +1,13 @@
1#ifndef __NVBIOS_BIT_H__
2#define __NVBIOS_BIT_H__
3
4struct bit_entry {
5 u8 id;
6 u8 version;
7 u16 length;
8 u16 offset;
9};
10
11int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *);
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
new file mode 100644
index 000000000000..10e4dbca649a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
@@ -0,0 +1,39 @@
1#ifndef __NVBIOS_BMP_H__
2#define __NVBIOS_BMP_H__
3
4static inline u16
5bmp_version(struct nouveau_bios *bios)
6{
7 if (bios->bmp_offset) {
8 return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
9 nv_ro08(bios, bios->bmp_offset + 6);
10 }
11
12 return 0x0000;
13}
14
15static inline u16
16bmp_mem_init_table(struct nouveau_bios *bios)
17{
18 if (bmp_version(bios) >= 0x0300)
19 return nv_ro16(bios, bios->bmp_offset + 24);
20 return 0x0000;
21}
22
23static inline u16
24bmp_sdr_seq_table(struct nouveau_bios *bios)
25{
26 if (bmp_version(bios) >= 0x0300)
27 return nv_ro16(bios, bios->bmp_offset + 26);
28 return 0x0000;
29}
30
31static inline u16
32bmp_ddr_seq_table(struct nouveau_bios *bios)
33{
34 if (bmp_version(bios) >= 0x0300)
35 return nv_ro16(bios, bios->bmp_offset + 28);
36 return 0x0000;
37}
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
new file mode 100644
index 000000000000..c1270548fd0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -0,0 +1,27 @@
1#ifndef __NVBIOS_CONN_H__
2#define __NVBIOS_CONN_H__
3
4enum dcb_connector_type {
5 DCB_CONNECTOR_VGA = 0x00,
6 DCB_CONNECTOR_TV_0 = 0x10,
7 DCB_CONNECTOR_TV_1 = 0x11,
8 DCB_CONNECTOR_TV_3 = 0x13,
9 DCB_CONNECTOR_DVI_I = 0x30,
10 DCB_CONNECTOR_DVI_D = 0x31,
11 DCB_CONNECTOR_DMS59_0 = 0x38,
12 DCB_CONNECTOR_DMS59_1 = 0x39,
13 DCB_CONNECTOR_LVDS = 0x40,
14 DCB_CONNECTOR_LVDS_SPWG = 0x41,
15 DCB_CONNECTOR_DP = 0x46,
16 DCB_CONNECTOR_eDP = 0x47,
17 DCB_CONNECTOR_HDMI_0 = 0x60,
18 DCB_CONNECTOR_HDMI_1 = 0x61,
19 DCB_CONNECTOR_DMS59_DP0 = 0x64,
20 DCB_CONNECTOR_DMS59_DP1 = 0x65,
21 DCB_CONNECTOR_NONE = 0xff
22};
23
24u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
25u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len);
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
new file mode 100644
index 000000000000..d682fb625833
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -0,0 +1,90 @@
1#ifndef __NVBIOS_DCB_H__
2#define __NVBIOS_DCB_H__
3
4struct nouveau_bios;
5
6enum dcb_output_type {
7 DCB_OUTPUT_ANALOG = 0x0,
8 DCB_OUTPUT_TV = 0x1,
9 DCB_OUTPUT_TMDS = 0x2,
10 DCB_OUTPUT_LVDS = 0x3,
11 DCB_OUTPUT_DP = 0x6,
12 DCB_OUTPUT_EOL = 0xe,
13 DCB_OUTPUT_UNUSED = 0xf,
14 DCB_OUTPUT_ANY = -1,
15};
16
17struct dcb_output {
18 int index; /* may not be raw dcb index if merging has happened */
19 enum dcb_output_type type;
20 uint8_t i2c_index;
21 uint8_t heads;
22 uint8_t connector;
23 uint8_t bus;
24 uint8_t location;
25 uint8_t or;
26 bool duallink_possible;
27 union {
28 struct sor_conf {
29 int link;
30 } sorconf;
31 struct {
32 int maxfreq;
33 } crtconf;
34 struct {
35 struct sor_conf sor;
36 bool use_straps_for_mode;
37 bool use_acpi_for_edid;
38 bool use_power_scripts;
39 } lvdsconf;
40 struct {
41 bool has_component_output;
42 } tvconf;
43 struct {
44 struct sor_conf sor;
45 int link_nr;
46 int link_bw;
47 } dpconf;
48 struct {
49 struct sor_conf sor;
50 int slave_addr;
51 } tmdsconf;
52 };
53 bool i2c_upper_default;
54};
55
56u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
57u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
58int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
59 (struct nouveau_bios *, void *, int index, u16 entry));
60
61
62/* BIT 'U'/'d' table encoder subtables have hashes matching them to
63 * a particular set of encoders.
64 *
65 * This function returns true if a particular DCB entry matches.
66 */
67static inline bool
68dcb_hash_match(struct dcb_output *dcb, u32 hash)
69{
70 if ((hash & 0x000000f0) != (dcb->location << 4))
71 return false;
72 if ((hash & 0x0000000f) != dcb->type)
73 return false;
74 if (!(hash & (dcb->or << 16)))
75 return false;
76
77 switch (dcb->type) {
78 case DCB_OUTPUT_TMDS:
79 case DCB_OUTPUT_LVDS:
80 case DCB_OUTPUT_DP:
81 if (hash & 0x00c00000) {
82 if (!(hash & (dcb->sorconf.link << 22)))
83 return false;
84 }
85 default:
86 return true;
87 }
88}
89
90#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
new file mode 100644
index 000000000000..73b5e5d3e75a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -0,0 +1,8 @@
1#ifndef __NVBIOS_DP_H__
2#define __NVBIOS_DP_H__
3
4u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
5u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
6u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
new file mode 100644
index 000000000000..949fee3af8fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
@@ -0,0 +1,30 @@
1#ifndef __NVBIOS_EXTDEV_H__
2#define __NVBIOS_EXTDEV_H__
3
4struct nouveau_bios;
5
6enum nvbios_extdev_type {
7 NVBIOS_EXTDEV_LM89 = 0x02,
8 NVBIOS_EXTDEV_VT1103M = 0x40,
9 NVBIOS_EXTDEV_PX3540 = 0x41,
10 NVBIOS_EXTDEV_VT1105M = 0x42, /* or close enough... */
11 NVBIOS_EXTDEV_ADT7473 = 0x70, /* can also be a LM64 */
12 NVBIOS_EXTDEV_HDCP_EEPROM = 0x90,
13 NVBIOS_EXTDEV_NONE = 0xff,
14};
15
16struct nvbios_extdev_func {
17 u8 type;
18 u8 addr;
19 u8 bus;
20};
21
22int
23nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *);
24
25int
26nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type,
27 struct nvbios_extdev_func *);
28
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
new file mode 100644
index 000000000000..2bf178082a36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -0,0 +1,33 @@
1#ifndef __NVBIOS_GPIO_H__
2#define __NVBIOS_GPIO_H__
3
4struct nouveau_bios;
5
6enum dcb_gpio_func_name {
7 DCB_GPIO_PANEL_POWER = 0x01,
8 DCB_GPIO_TVDAC0 = 0x0c,
9 DCB_GPIO_TVDAC1 = 0x2d,
10 DCB_GPIO_PWM_FAN = 0x09,
11 DCB_GPIO_FAN_SENSE = 0x3d,
12 DCB_GPIO_UNUSED = 0xff
13};
14
15struct dcb_gpio_func {
16 u8 func;
17 u8 line;
18 u8 log[2];
19
20 /* so far, "param" seems to only have an influence on PWM-related
21 * GPIOs such as FAN_CONTROL and PANEL_BACKLIGHT_LEVEL.
22 * if param equals 1, hardware PWM is available
23 * if param equals 0, the host should toggle the GPIO itself
24 */
25 u8 param;
26};
27
28u16 dcb_gpio_table(struct nouveau_bios *);
29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
30int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
31 struct dcb_gpio_func *);
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
new file mode 100644
index 000000000000..5079bedfd985
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -0,0 +1,25 @@
1#ifndef __NVBIOS_I2C_H__
2#define __NVBIOS_I2C_H__
3
4struct nouveau_bios;
5
6enum dcb_i2c_type {
7 DCB_I2C_NV04_BIT = 0,
8 DCB_I2C_NV4E_BIT = 4,
9 DCB_I2C_NVIO_BIT = 5,
10 DCB_I2C_NVIO_AUX = 6,
11 DCB_I2C_UNUSED = 0xff
12};
13
14struct dcb_i2c_entry {
15 enum dcb_i2c_type type;
16 u8 drive;
17 u8 sense;
18 u32 data;
19};
20
21u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
22u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len);
23int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
new file mode 100644
index 000000000000..e69a8bdc6e97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -0,0 +1,21 @@
1#ifndef __NVBIOS_INIT_H__
2#define __NVBIOS_INIT_H__
3
4struct nvbios_init {
5 struct nouveau_subdev *subdev;
6 struct nouveau_bios *bios;
7 u16 offset;
8 struct dcb_output *outp;
9 int crtc;
10
11 /* internal state used during parsing */
12 u8 execute;
13 u32 nested;
14 u16 repeat;
15 u16 repend;
16};
17
18int nvbios_exec(struct nvbios_init *);
19int nvbios_init(struct nouveau_subdev *, bool execute);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
new file mode 100644
index 000000000000..5572e60414e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
@@ -0,0 +1,9 @@
1#ifndef __NVBIOS_MXM_H__
2#define __NVBIOS_MXM_H__
3
4u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr);
5
6u8 mxm_sor_map(struct nouveau_bios *, u8 conn);
7u8 mxm_ddc_map(struct nouveau_bios *, u8 port);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
new file mode 100644
index 000000000000..0b285e99be5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -0,0 +1,14 @@
1#ifndef __NVBIOS_PERF_H__
2#define __NVBIOS_PERF_H__
3
4struct nouveau_bios;
5
6struct nvbios_perf_fan {
7 u32 pwm_divisor;
8};
9
10int
11nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *);
12
13
14#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
new file mode 100644
index 000000000000..c345097592f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -0,0 +1,77 @@
1#ifndef __NVBIOS_PLL_H__
2#define __NVBIOS_PLL_H__
3
4/*XXX: kill me */
5struct nouveau_pll_vals {
6 union {
7 struct {
8#ifdef __BIG_ENDIAN
9 uint8_t N1, M1, N2, M2;
10#else
11 uint8_t M1, N1, M2, N2;
12#endif
13 };
14 struct {
15 uint16_t NM1, NM2;
16 } __attribute__((packed));
17 };
18 int log2P;
19
20 int refclk;
21};
22
23struct nouveau_bios;
24
25/* these match types in pll limits table version 0x40,
26 * nouveau uses them on all chipsets internally where a
27 * specific pll needs to be referenced, but the exact
28 * register isn't known.
29 */
30enum nvbios_pll_type {
31 PLL_CORE = 0x01,
32 PLL_SHADER = 0x02,
33 PLL_UNK03 = 0x03,
34 PLL_MEMORY = 0x04,
35 PLL_VDEC = 0x05,
36 PLL_UNK40 = 0x40,
37 PLL_UNK41 = 0x41,
38 PLL_UNK42 = 0x42,
39 PLL_VPLL0 = 0x80,
40 PLL_VPLL1 = 0x81,
41 PLL_MAX = 0xff
42};
43
44struct nvbios_pll {
45 enum nvbios_pll_type type;
46 u32 reg;
47 u32 refclk;
48
49 u8 min_p;
50 u8 max_p;
51 u8 bias_p;
52
53 /*
54 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
55 * value) is no different to 6 (at least for vplls) so allowing the MNP
56 * calc to use 7 causes the generated clock to be out by a factor of 2.
57 * however, max_log2p cannot be fixed-up during parsing as the
58 * unmodified max_log2p value is still needed for setting mplls, hence
59 * an additional max_usable_log2p member
60 */
61 u8 max_p_usable;
62
63 struct {
64 u32 min_freq;
65 u32 max_freq;
66 u32 min_inputfreq;
67 u32 max_inputfreq;
68 u8 min_m;
69 u8 max_m;
70 u8 min_n;
71 u8 max_n;
72 } vco1, vco2;
73};
74
75int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *);
76
77#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
new file mode 100644
index 000000000000..a2c4296fc5f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -0,0 +1,46 @@
1#ifndef __NVBIOS_THERM_H__
2#define __NVBIOS_THERM_H__
3
4struct nouveau_bios;
5
6struct nvbios_therm_threshold {
7 u8 temp;
8 u8 hysteresis;
9};
10
11struct nvbios_therm_sensor {
12 /* diode */
13 s16 slope_mult;
14 s16 slope_div;
15 s16 offset_num;
16 s16 offset_den;
17 s8 offset_constant;
18
19 /* thresholds */
20 struct nvbios_therm_threshold thrs_fan_boost;
21 struct nvbios_therm_threshold thrs_down_clock;
22 struct nvbios_therm_threshold thrs_critical;
23 struct nvbios_therm_threshold thrs_shutdown;
24};
25
26struct nvbios_therm_fan {
27 u16 pwm_freq;
28
29 u8 min_duty;
30 u8 max_duty;
31};
32
33enum nvbios_therm_domain {
34 NVBIOS_THERM_DOMAIN_CORE,
35 NVBIOS_THERM_DOMAIN_AMBIENT,
36};
37
38int
39nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain,
40 struct nvbios_therm_sensor *);
41
42int
43nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *);
44
45
46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
new file mode 100644
index 000000000000..39e73b91d360
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -0,0 +1,59 @@
1#ifndef __NOUVEAU_CLOCK_H__
2#define __NOUVEAU_CLOCK_H__
3
4#include <core/device.h>
5#include <core/subdev.h>
6
7struct nouveau_pll_vals;
8struct nvbios_pll;
9
10struct nouveau_clock {
11 struct nouveau_subdev base;
12
13 int (*pll_set)(struct nouveau_clock *, u32 type, u32 freq);
14
15 /*XXX: die, these are here *only* to support the completely
16 * bat-shit insane what-was-nouveau_hw.c code
17 */
18 int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *,
19 int clk, struct nouveau_pll_vals *pv);
20 int (*pll_prog)(struct nouveau_clock *, u32 reg1,
21 struct nouveau_pll_vals *pv);
22};
23
24static inline struct nouveau_clock *
25nouveau_clock(void *obj)
26{
27 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
28}
29
30#define nouveau_clock_create(p,e,o,d) \
31 nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
32#define nouveau_clock_destroy(p) \
33 nouveau_subdev_destroy(&(p)->base)
34#define nouveau_clock_init(p) \
35 nouveau_subdev_init(&(p)->base)
36#define nouveau_clock_fini(p,s) \
37 nouveau_subdev_fini(&(p)->base, (s))
38
39int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, void *, u32, int, void **);
41
42#define _nouveau_clock_dtor _nouveau_subdev_dtor
43#define _nouveau_clock_init _nouveau_subdev_init
44#define _nouveau_clock_fini _nouveau_subdev_fini
45
46extern struct nouveau_oclass nv04_clock_oclass;
47extern struct nouveau_oclass nv40_clock_oclass;
48extern struct nouveau_oclass nv50_clock_oclass;
49extern struct nouveau_oclass nva3_clock_oclass;
50extern struct nouveau_oclass nvc0_clock_oclass;
51
52int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
53int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
54 int clk, struct nouveau_pll_vals *);
55int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
56 struct nouveau_pll_vals *);
57
58
59#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
new file mode 100644
index 000000000000..c9e4c4afa50e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
@@ -0,0 +1,24 @@
1#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
2#define __NOUVEAU_SUBDEV_DEVICE_H__
3
4#include <core/device.h>
5
6#define nouveau_device_create(p,n,s,c,d,u) \
7 nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
8
9int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
10 const char *cfg, const char *dbg, int, void **);
11
12int nv04_identify(struct nouveau_device *);
13int nv10_identify(struct nouveau_device *);
14int nv20_identify(struct nouveau_device *);
15int nv30_identify(struct nouveau_device *);
16int nv40_identify(struct nouveau_device *);
17int nv50_identify(struct nouveau_device *);
18int nvc0_identify(struct nouveau_device *);
19int nve0_identify(struct nouveau_device *);
20
21extern struct nouveau_oclass nouveau_device_sclass[];
22struct nouveau_device *nouveau_device_find(u64 name);
23
24#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
new file mode 100644
index 000000000000..29e4cc1f6cc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -0,0 +1,40 @@
1#ifndef __NOUVEAU_DEVINIT_H__
2#define __NOUVEAU_DEVINIT_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_devinit {
8 struct nouveau_subdev base;
9 bool post;
10 void (*meminit)(struct nouveau_devinit *);
11};
12
13static inline struct nouveau_devinit *
14nouveau_devinit(void *obj)
15{
16 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
17}
18
19#define nouveau_devinit_create(p,e,o,d) \
20 nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
21#define nouveau_devinit_destroy(p) \
22 nouveau_subdev_destroy(&(p)->base)
23
24int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
25 struct nouveau_oclass *, int, void **);
26int nouveau_devinit_init(struct nouveau_devinit *);
27int nouveau_devinit_fini(struct nouveau_devinit *, bool suspend);
28
29extern struct nouveau_oclass nv04_devinit_oclass;
30extern struct nouveau_oclass nv05_devinit_oclass;
31extern struct nouveau_oclass nv10_devinit_oclass;
32extern struct nouveau_oclass nv1a_devinit_oclass;
33extern struct nouveau_oclass nv20_devinit_oclass;
34extern struct nouveau_oclass nv50_devinit_oclass;
35
36void nv04_devinit_dtor(struct nouveau_object *);
37int nv04_devinit_init(struct nouveau_object *);
38int nv04_devinit_fini(struct nouveau_object *, bool);
39
40#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
new file mode 100644
index 000000000000..5c1b5e1904f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -0,0 +1,134 @@
1#ifndef __NOUVEAU_FB_H__
2#define __NOUVEAU_FB_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6#include <core/mm.h>
7
8#include <subdev/vm.h>
9
10/* memory type/access flags, do not match hardware values */
11#define NV_MEM_ACCESS_RO 1
12#define NV_MEM_ACCESS_WO 2
13#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
14#define NV_MEM_ACCESS_SYS 4
15#define NV_MEM_ACCESS_VM 8
16#define NV_MEM_ACCESS_NOSNOOP 16
17
18#define NV_MEM_TARGET_VRAM 0
19#define NV_MEM_TARGET_PCI 1
20#define NV_MEM_TARGET_PCI_NOSNOOP 2
21#define NV_MEM_TARGET_VM 3
22#define NV_MEM_TARGET_GART 4
23
24#define NV_MEM_TYPE_VM 0x7f
25#define NV_MEM_COMP_VM 0x03
26
27struct nouveau_mem {
28 struct drm_device *dev;
29
30 struct nouveau_vma bar_vma;
31 struct nouveau_vma vma[2];
32 u8 page_shift;
33
34 struct nouveau_mm_node *tag;
35 struct list_head regions;
36 dma_addr_t *pages;
37 u32 memtype;
38 u64 offset;
39 u64 size;
40 struct sg_table *sg;
41};
42
43struct nouveau_fb_tile {
44 struct nouveau_mm_node *tag;
45 u32 addr;
46 u32 limit;
47 u32 pitch;
48 u32 zcomp;
49};
50
51struct nouveau_fb {
52 struct nouveau_subdev base;
53
54 bool (*memtype_valid)(struct nouveau_fb *, u32 memtype);
55
56 struct {
57 enum {
58 NV_MEM_TYPE_UNKNOWN = 0,
59 NV_MEM_TYPE_STOLEN,
60 NV_MEM_TYPE_SGRAM,
61 NV_MEM_TYPE_SDRAM,
62 NV_MEM_TYPE_DDR1,
63 NV_MEM_TYPE_DDR2,
64 NV_MEM_TYPE_DDR3,
65 NV_MEM_TYPE_GDDR2,
66 NV_MEM_TYPE_GDDR3,
67 NV_MEM_TYPE_GDDR4,
68 NV_MEM_TYPE_GDDR5
69 } type;
70 u64 stolen;
71 u64 size;
72 int ranks;
73
74 int (*get)(struct nouveau_fb *, u64 size, u32 align,
75 u32 size_nc, u32 type, struct nouveau_mem **);
76 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
77 } ram;
78
79 struct nouveau_mm vram;
80 struct nouveau_mm tags;
81
82 struct {
83 struct nouveau_fb_tile region[16];
84 int regions;
85 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
86 u32 pitch, u32 flags, struct nouveau_fb_tile *);
87 void (*fini)(struct nouveau_fb *, int i,
88 struct nouveau_fb_tile *);
89 void (*prog)(struct nouveau_fb *, int i,
90 struct nouveau_fb_tile *);
91 } tile;
92};
93
94static inline struct nouveau_fb *
95nouveau_fb(void *obj)
96{
97 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
98}
99
100#define nouveau_fb_create(p,e,c,d) \
101 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
102int nouveau_fb_created(struct nouveau_fb *);
103void nouveau_fb_destroy(struct nouveau_fb *);
104int nouveau_fb_init(struct nouveau_fb *);
105#define nouveau_fb_fini(p,s) \
106 nouveau_subdev_fini(&(p)->base, (s))
107
108void _nouveau_fb_dtor(struct nouveau_object *);
109int _nouveau_fb_init(struct nouveau_object *);
110#define _nouveau_fb_fini _nouveau_subdev_fini
111
112extern struct nouveau_oclass nv04_fb_oclass;
113extern struct nouveau_oclass nv10_fb_oclass;
114extern struct nouveau_oclass nv20_fb_oclass;
115extern struct nouveau_oclass nv30_fb_oclass;
116extern struct nouveau_oclass nv40_fb_oclass;
117extern struct nouveau_oclass nv50_fb_oclass;
118extern struct nouveau_oclass nvc0_fb_oclass;
119
120struct nouveau_bios;
121int nouveau_fb_bios_memtype(struct nouveau_bios *);
122
123bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
124
125void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
126
127void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
128 u32 pitch, u32 flags, struct nouveau_fb_tile *);
129void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
130
131void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
132void nv50_fb_trap(struct nouveau_fb *, int display);
133
134#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
new file mode 100644
index 000000000000..9ea2b12cc15d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -0,0 +1,64 @@
1#ifndef __NOUVEAU_GPIO_H__
2#define __NOUVEAU_GPIO_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/bios.h>
8#include <subdev/bios/gpio.h>
9
10struct nouveau_gpio {
11 struct nouveau_subdev base;
12
13 /* hardware interfaces */
14 void (*reset)(struct nouveau_gpio *);
15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
16 int (*sense)(struct nouveau_gpio *, int line);
17 void (*irq_enable)(struct nouveau_gpio *, int line, bool);
18
19 /* software interfaces */
20 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
21 struct dcb_gpio_func *);
22 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
23 int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
24 int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
25
26 /* interrupt handling */
27 struct list_head isr;
28 spinlock_t lock;
29
30 void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
31 int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
32 void (*)(void *, int state), void *data);
33 void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
34 void (*)(void *, int state), void *data);
35};
36
37static inline struct nouveau_gpio *
38nouveau_gpio(void *obj)
39{
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
41}
42
43#define nouveau_gpio_create(p,e,o,d) \
44 nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
45#define nouveau_gpio_destroy(p) \
46 nouveau_subdev_destroy(&(p)->base)
47#define nouveau_gpio_fini(p,s) \
48 nouveau_subdev_fini(&(p)->base, (s))
49
50int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
51 struct nouveau_oclass *, int, void **);
52int nouveau_gpio_init(struct nouveau_gpio *);
53
54extern struct nouveau_oclass nv10_gpio_oclass;
55extern struct nouveau_oclass nv50_gpio_oclass;
56extern struct nouveau_oclass nvd0_gpio_oclass;
57
58void nv50_gpio_dtor(struct nouveau_object *);
59int nv50_gpio_init(struct nouveau_object *);
60int nv50_gpio_fini(struct nouveau_object *, bool);
61void nv50_gpio_intr(struct nouveau_subdev *);
62void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
63
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
new file mode 100644
index 000000000000..b93ab01e3785
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_I2C_H__
2#define __NOUVEAU_I2C_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/bios.h>
8#include <subdev/bios/i2c.h>
9
10#define NV_I2C_PORT(n) (0x00 + (n))
11#define NV_I2C_DEFAULT(n) (0x80 + (n))
12
13struct nouveau_i2c_port {
14 struct i2c_adapter adapter;
15 struct nouveau_i2c *i2c;
16 struct i2c_algo_bit_data bit;
17 struct list_head head;
18 u8 index;
19 u8 type;
20 u32 dcb;
21 u32 drive;
22 u32 sense;
23 u32 state;
24};
25
26struct nouveau_i2c {
27 struct nouveau_subdev base;
28
29 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
30 int (*identify)(struct nouveau_i2c *, int index,
31 const char *what, struct i2c_board_info *,
32 bool (*match)(struct nouveau_i2c_port *,
33 struct i2c_board_info *));
34 struct list_head ports;
35};
36
37static inline struct nouveau_i2c *
38nouveau_i2c(void *obj)
39{
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
41}
42
43extern struct nouveau_oclass nouveau_i2c_oclass;
44
45void nouveau_i2c_drive_scl(void *, int);
46void nouveau_i2c_drive_sda(void *, int);
47int nouveau_i2c_sense_scl(void *);
48int nouveau_i2c_sense_sda(void *);
49
50int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
51int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
52bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
53
54int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
55int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
56
57extern const struct i2c_algorithm nouveau_i2c_bit_algo;
58extern const struct i2c_algorithm nouveau_i2c_aux_algo;
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
new file mode 100644
index 000000000000..88814f159d89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -0,0 +1,34 @@
1#ifndef __NOUVEAU_IBUS_H__
2#define __NOUVEAU_IBUS_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_ibus {
8 struct nouveau_subdev base;
9};
10
11static inline struct nouveau_ibus *
12nouveau_ibus(void *obj)
13{
14 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS];
15}
16
17#define nouveau_ibus_create(p,e,o,d) \
18 nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
19 sizeof(**d), (void **)d)
20#define nouveau_ibus_destroy(p) \
21 nouveau_subdev_destroy(&(p)->base)
22#define nouveau_ibus_init(p) \
23 nouveau_subdev_init(&(p)->base)
24#define nouveau_ibus_fini(p,s) \
25 nouveau_subdev_fini(&(p)->base, (s))
26
27#define _nouveau_ibus_dtor _nouveau_subdev_dtor
28#define _nouveau_ibus_init _nouveau_subdev_init
29#define _nouveau_ibus_fini _nouveau_subdev_fini
30
31extern struct nouveau_oclass nvc0_ibus_oclass;
32extern struct nouveau_oclass nve0_ibus_oclass;
33
34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
new file mode 100644
index 000000000000..ec7a54e91a08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -0,0 +1,73 @@
1#ifndef __NOUVEAU_INSTMEM_H__
2#define __NOUVEAU_INSTMEM_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6#include <core/mm.h>
7
8struct nouveau_instobj {
9 struct nouveau_object base;
10 struct list_head head;
11 u32 *suspend;
12 u64 addr;
13 u32 size;
14};
15
16static inline struct nouveau_instobj *
17nv_memobj(void *obj)
18{
19#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
20 if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
21 nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
22#endif
23 return obj;
24}
25
26#define nouveau_instobj_create(p,e,o,d) \
27 nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
28#define nouveau_instobj_init(p) \
29 nouveau_object_init(&(p)->base)
30#define nouveau_instobj_fini(p,s) \
31 nouveau_object_fini(&(p)->base, (s))
32
33int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
34 struct nouveau_oclass *, int, void **);
35void nouveau_instobj_destroy(struct nouveau_instobj *);
36
37void _nouveau_instobj_dtor(struct nouveau_object *);
38#define _nouveau_instobj_init nouveau_object_init
39#define _nouveau_instobj_fini nouveau_object_fini
40
41struct nouveau_instmem {
42 struct nouveau_subdev base;
43 struct list_head list;
44
45 u32 reserved;
46 int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
47 u32 size, u32 align, struct nouveau_object **);
48};
49
50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj)
52{
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54}
55
56#define nouveau_instmem_create(p,e,o,d) \
57 nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
58#define nouveau_instmem_destroy(p) \
59 nouveau_subdev_destroy(&(p)->base)
60int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
61 struct nouveau_oclass *, int, void **);
62int nouveau_instmem_init(struct nouveau_instmem *);
63int nouveau_instmem_fini(struct nouveau_instmem *, bool);
64
65#define _nouveau_instmem_dtor _nouveau_subdev_dtor
66int _nouveau_instmem_init(struct nouveau_object *);
67int _nouveau_instmem_fini(struct nouveau_object *, bool);
68
69extern struct nouveau_oclass nv04_instmem_oclass;
70extern struct nouveau_oclass nv40_instmem_oclass;
71extern struct nouveau_oclass nv50_instmem_oclass;
72
73#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
new file mode 100644
index 000000000000..f351f63bc654
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -0,0 +1,33 @@
1#ifndef __NOUVEAU_LTCG_H__
2#define __NOUVEAU_LTCG_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_ltcg {
8 struct nouveau_subdev base;
9};
10
11static inline struct nouveau_ltcg *
12nouveau_ltcg(void *obj)
13{
14 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
15}
16
17#define nouveau_ltcg_create(p,e,o,d) \
18 nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \
19 sizeof(**d), (void **)d)
20#define nouveau_ltcg_destroy(p) \
21 nouveau_subdev_destroy(&(p)->base)
22#define nouveau_ltcg_init(p) \
23 nouveau_subdev_init(&(p)->base)
24#define nouveau_ltcg_fini(p,s) \
25 nouveau_subdev_fini(&(p)->base, (s))
26
27#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
28#define _nouveau_ltcg_init _nouveau_subdev_init
29#define _nouveau_ltcg_fini _nouveau_subdev_fini
30
31extern struct nouveau_oclass nvc0_ltcg_oclass;
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
new file mode 100644
index 000000000000..fded97cea500
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -0,0 +1,49 @@
1#ifndef __NOUVEAU_MC_H__
2#define __NOUVEAU_MC_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_mc_intr {
8 u32 stat;
9 u32 unit;
10};
11
12struct nouveau_mc {
13 struct nouveau_subdev base;
14 const struct nouveau_mc_intr *intr_map;
15};
16
17static inline struct nouveau_mc *
18nouveau_mc(void *obj)
19{
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21}
22
23#define nouveau_mc_create(p,e,o,d) \
24 nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master", \
25 sizeof(**d), (void **)d)
26#define nouveau_mc_destroy(p) \
27 nouveau_subdev_destroy(&(p)->base)
28#define nouveau_mc_init(p) \
29 nouveau_subdev_init(&(p)->base)
30#define nouveau_mc_fini(p,s) \
31 nouveau_subdev_fini(&(p)->base, (s))
32
33#define _nouveau_mc_dtor _nouveau_subdev_dtor
34#define _nouveau_mc_init _nouveau_subdev_init
35#define _nouveau_mc_fini _nouveau_subdev_fini
36
37extern struct nouveau_oclass nv04_mc_oclass;
38extern struct nouveau_oclass nv44_mc_oclass;
39extern struct nouveau_oclass nv50_mc_oclass;
40extern struct nouveau_oclass nv98_mc_oclass;
41extern struct nouveau_oclass nvc0_mc_oclass;
42
43void nouveau_mc_intr(struct nouveau_subdev *);
44
45extern const struct nouveau_mc_intr nv04_mc_intr[];
46int nv04_mc_init(struct nouveau_object *);
47int nv50_mc_init(struct nouveau_object *);
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
new file mode 100644
index 000000000000..b93b152cb566
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
@@ -0,0 +1,37 @@
1#ifndef __NOUVEAU_MXM_H__
2#define __NOUVEAU_MXM_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#define MXM_SANITISE_DCB 0x00000001
8
9struct nouveau_mxm {
10 struct nouveau_subdev base;
11 u32 action;
12 u8 *mxms;
13};
14
15static inline struct nouveau_mxm *
16nouveau_mxm(void *obj)
17{
18 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM];
19}
20
21#define nouveau_mxm_create(p,e,o,d) \
22 nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
23#define nouveau_mxm_init(p) \
24 nouveau_subdev_init(&(p)->base)
25#define nouveau_mxm_fini(p,s) \
26 nouveau_subdev_fini(&(p)->base, (s))
27int nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *,
28 struct nouveau_oclass *, int, void **);
29void nouveau_mxm_destroy(struct nouveau_mxm *);
30
31#define _nouveau_mxm_dtor _nouveau_subdev_dtor
32#define _nouveau_mxm_init _nouveau_subdev_init
33#define _nouveau_mxm_fini _nouveau_subdev_fini
34
35extern struct nouveau_oclass nv50_mxm_oclass;
36
37#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
new file mode 100644
index 000000000000..faee569fd458
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -0,0 +1,58 @@
1#ifndef __NOUVEAU_THERM_H__
2#define __NOUVEAU_THERM_H__
3
4#include <core/device.h>
5#include <core/subdev.h>
6
7enum nouveau_therm_fan_mode {
8 FAN_CONTROL_NONE = 0,
9 FAN_CONTROL_MANUAL = 1,
10 FAN_CONTROL_NR,
11};
12
13enum nouveau_therm_attr_type {
14 NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0,
15 NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1,
16 NOUVEAU_THERM_ATTR_FAN_MODE = 2,
17
18 NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10,
19 NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
20 NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12,
21 NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
22 NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14,
23 NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15,
24 NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16,
25 NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
26};
27
28struct nouveau_therm {
29 struct nouveau_subdev base;
30
31 int (*fan_get)(struct nouveau_therm *);
32 int (*fan_set)(struct nouveau_therm *, int);
33 int (*fan_sense)(struct nouveau_therm *);
34
35 int (*temp_get)(struct nouveau_therm *);
36
37 int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type);
38 int (*attr_set)(struct nouveau_therm *,
39 enum nouveau_therm_attr_type, int);
40};
41
42static inline struct nouveau_therm *
43nouveau_therm(void *obj)
44{
45 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM];
46}
47
48#define nouveau_therm_create(p,e,o,d) \
49 nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
50#define nouveau_therm_destroy(p) \
51 nouveau_subdev_destroy(&(p)->base)
52
53#define _nouveau_therm_dtor _nouveau_subdev_dtor
54
55extern struct nouveau_oclass nv40_therm_oclass;
56extern struct nouveau_oclass nv50_therm_oclass;
57
58#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
new file mode 100644
index 000000000000..49bff901544c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -0,0 +1,53 @@
1#ifndef __NOUVEAU_TIMER_H__
2#define __NOUVEAU_TIMER_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_alarm {
8 struct list_head head;
9 u64 timestamp;
10 void (*func)(struct nouveau_alarm *);
11};
12
13bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
14bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
15bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
16void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
17
18#define NV_WAIT_DEFAULT 2000000000ULL
19#define nv_wait(o,a,m,v) \
20 nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
21#define nv_wait_ne(o,a,m,v) \
22 nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
23#define nv_wait_cb(o,c,d) \
24 nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
25
26struct nouveau_timer {
27 struct nouveau_subdev base;
28 u64 (*read)(struct nouveau_timer *);
29 void (*alarm)(struct nouveau_timer *, u32 time, struct nouveau_alarm *);
30};
31
32static inline struct nouveau_timer *
33nouveau_timer(void *obj)
34{
35 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER];
36}
37
38#define nouveau_timer_create(p,e,o,d) \
39 nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \
40 sizeof(**d), (void **)d)
41#define nouveau_timer_destroy(p) \
42 nouveau_subdev_destroy(&(p)->base)
43#define nouveau_timer_init(p) \
44 nouveau_subdev_init(&(p)->base)
45#define nouveau_timer_fini(p,s) \
46 nouveau_subdev_fini(&(p)->base, (s))
47
48int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
49 struct nouveau_oclass *, int size, void **);
50
51extern struct nouveau_oclass nv04_timer_oclass;
52
53#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
new file mode 100644
index 000000000000..fee09ad818e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
@@ -0,0 +1,30 @@
1#ifndef __NOUVEAU_VGA_H__
2#define __NOUVEAU_VGA_H__
3
4#include <core/os.h>
5
6/* access to various legacy io ports */
7u8 nv_rdport(void *obj, int head, u16 port);
8void nv_wrport(void *obj, int head, u16 port, u8 value);
9
10/* VGA Sequencer */
11u8 nv_rdvgas(void *obj, int head, u8 index);
12void nv_wrvgas(void *obj, int head, u8 index, u8 value);
13
14/* VGA Graphics */
15u8 nv_rdvgag(void *obj, int head, u8 index);
16void nv_wrvgag(void *obj, int head, u8 index, u8 value);
17
18/* VGA CRTC */
19u8 nv_rdvgac(void *obj, int head, u8 index);
20void nv_wrvgac(void *obj, int head, u8 index, u8 value);
21
22/* VGA indexed port access dispatcher */
23u8 nv_rdvgai(void *obj, int head, u16 port, u8 index);
24void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
25
26bool nv_lockvgac(void *obj, bool lock);
27u8 nv_rdvgaowner(void *obj);
28void nv_wrvgaowner(void *obj, u8);
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index a8246e7e4a89..9d595efe667a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -25,10 +25,10 @@
25#ifndef __NOUVEAU_VM_H__ 25#ifndef __NOUVEAU_VM_H__
26#define __NOUVEAU_VM_H__ 26#define __NOUVEAU_VM_H__
27 27
28#include "drmP.h" 28#include <core/object.h>
29 29#include <core/subdev.h>
30#include "nouveau_drv.h" 30#include <core/device.h>
31#include "nouveau_mm.h" 31#include <core/mm.h>
32 32
33struct nouveau_vm_pgt { 33struct nouveau_vm_pgt {
34 struct nouveau_gpuobj *obj[2]; 34 struct nouveau_gpuobj *obj[2];
@@ -40,6 +40,9 @@ struct nouveau_vm_pgd {
40 struct nouveau_gpuobj *obj; 40 struct nouveau_gpuobj *obj;
41}; 41};
42 42
43struct nouveau_gpuobj;
44struct nouveau_mem;
45
43struct nouveau_vma { 46struct nouveau_vma {
44 struct list_head head; 47 struct list_head head;
45 int refcount; 48 int refcount;
@@ -50,21 +53,30 @@ struct nouveau_vma {
50}; 53};
51 54
52struct nouveau_vm { 55struct nouveau_vm {
53 struct drm_device *dev; 56 struct nouveau_vmmgr *vmm;
54 struct nouveau_mm mm; 57 struct nouveau_mm mm;
55 int refcount; 58 int refcount;
56 59
57 struct list_head pgd_list; 60 struct list_head pgd_list;
58 atomic_t engref[16]; 61 atomic_t engref[64]; //NVDEV_SUBDEV_NR];
59 62
60 struct nouveau_vm_pgt *pgt; 63 struct nouveau_vm_pgt *pgt;
61 u32 fpde; 64 u32 fpde;
62 u32 lpde; 65 u32 lpde;
66};
67
68struct nouveau_vmmgr {
69 struct nouveau_subdev base;
63 70
71 u64 limit;
72 u8 dma_bits;
64 u32 pgt_bits; 73 u32 pgt_bits;
65 u8 spg_shift; 74 u8 spg_shift;
66 u8 lpg_shift; 75 u8 lpg_shift;
67 76
77 int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
78 u64 mm_offset, struct nouveau_vm **);
79
68 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, 80 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
69 struct nouveau_gpuobj *pgt[2]); 81 struct nouveau_gpuobj *pgt[2]);
70 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 82 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
@@ -72,16 +84,47 @@ struct nouveau_vm {
72 u64 phys, u64 delta); 84 u64 phys, u64 delta);
73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 85 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
74 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); 86 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
75
76 void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
77 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
78 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 87 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
79 void (*flush)(struct nouveau_vm *); 88 void (*flush)(struct nouveau_vm *);
80}; 89};
81 90
82/* nouveau_vm.c */ 91static inline struct nouveau_vmmgr *
83int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, 92nouveau_vmmgr(void *obj)
93{
94 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
95}
96
97#define nouveau_vmmgr_create(p,e,o,i,f,d) \
98 nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
99#define nouveau_vmmgr_destroy(p) \
100 nouveau_subdev_destroy(&(p)->base)
101#define nouveau_vmmgr_init(p) \
102 nouveau_subdev_init(&(p)->base)
103#define nouveau_vmmgr_fini(p,s) \
104 nouveau_subdev_fini(&(p)->base, (s))
105
106#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
107#define _nouveau_vmmgr_init _nouveau_subdev_init
108#define _nouveau_vmmgr_fini _nouveau_subdev_fini
109
110extern struct nouveau_oclass nv04_vmmgr_oclass;
111extern struct nouveau_oclass nv41_vmmgr_oclass;
112extern struct nouveau_oclass nv44_vmmgr_oclass;
113extern struct nouveau_oclass nv50_vmmgr_oclass;
114extern struct nouveau_oclass nvc0_vmmgr_oclass;
115
116int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
84 struct nouveau_vm **); 117 struct nouveau_vm **);
118void nv04_vmmgr_dtor(struct nouveau_object *);
119
120void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
121void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
122
123/* nouveau_vm.c */
124int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
125 u64 mm_offset, u32 block, struct nouveau_vm **);
126int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
127 u64 mm_offset, struct nouveau_vm **);
85int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, 128int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
86 struct nouveau_gpuobj *pgd); 129 struct nouveau_gpuobj *pgd);
87int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, 130int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
@@ -94,26 +137,6 @@ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
94void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 137void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
95 struct nouveau_mem *); 138 struct nouveau_mem *);
96void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, 139void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
97 struct nouveau_mem *mem); 140 struct nouveau_mem *mem);
98/* nv50_vm.c */
99void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
100 struct nouveau_gpuobj *pgt[2]);
101void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
102 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
103void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
104 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
105void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
106void nv50_vm_flush(struct nouveau_vm *);
107void nv50_vm_flush_engine(struct drm_device *, int engine);
108
109/* nvc0_vm.c */
110void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
111 struct nouveau_gpuobj *pgt[2]);
112void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
113 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
114void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
115 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
116void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
117void nvc0_vm_flush(struct nouveau_vm *);
118 141
119#endif 142#endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
new file mode 100644
index 000000000000..cfe3b9cad156
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_OS_H__
2#define __NOUVEAU_OS_H__
3
4#include <linux/types.h>
5#include <linux/slab.h>
6#include <linux/mutex.h>
7#include <linux/pci.h>
8#include <linux/printk.h>
9#include <linux/bitops.h>
10#include <linux/firmware.h>
11#include <linux/module.h>
12#include <linux/i2c.h>
13#include <linux/i2c-algo-bit.h>
14#include <linux/delay.h>
15#include <linux/io-mapping.h>
16#include <linux/vmalloc.h>
17#include <linux/acpi.h>
18#include <linux/dmi.h>
19
20#include <asm/unaligned.h>
21
22static inline int
23ffsll(u64 mask)
24{
25 int i;
26 for (i = 0; i < 64; i++) {
27 if (mask & (1ULL << i))
28 return i + 1;
29 }
30 return 0;
31}
32
33#ifndef ioread32_native
34#ifdef __BIG_ENDIAN
35#define ioread16_native ioread16be
36#define iowrite16_native iowrite16be
37#define ioread32_native ioread32be
38#define iowrite32_native iowrite32be
39#else /* def __BIG_ENDIAN */
40#define ioread16_native ioread16
41#define iowrite16_native iowrite16
42#define ioread32_native ioread32
43#define iowrite32_native iowrite32
44#endif /* def __BIG_ENDIAN else */
45#endif /* !ioread32_native */
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
new file mode 100644
index 000000000000..cd01c533007a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <subdev/bar.h>
27
28struct nouveau_barobj {
29 struct nouveau_object base;
30 struct nouveau_vma vma;
31 void __iomem *iomem;
32};
33
34static int
35nouveau_barobj_ctor(struct nouveau_object *parent,
36 struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *mem, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_bar *bar = (void *)engine;
41 struct nouveau_barobj *barobj;
42 int ret;
43
44 ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
45 *pobject = nv_object(barobj);
46 if (ret)
47 return ret;
48
49 ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
50 if (ret)
51 return ret;
52
53 barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
54 return 0;
55}
56
57static void
58nouveau_barobj_dtor(struct nouveau_object *object)
59{
60 struct nouveau_bar *bar = (void *)object->engine;
61 struct nouveau_barobj *barobj = (void *)object;
62 if (barobj->vma.node)
63 bar->unmap(bar, &barobj->vma);
64 nouveau_object_destroy(&barobj->base);
65}
66
67static u32
68nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
69{
70 struct nouveau_barobj *barobj = (void *)object;
71 return ioread32_native(barobj->iomem + addr);
72}
73
74static void
75nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
76{
77 struct nouveau_barobj *barobj = (void *)object;
78 iowrite32_native(data, barobj->iomem + addr);
79}
80
81static struct nouveau_oclass
82nouveau_barobj_oclass = {
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nouveau_barobj_ctor,
85 .dtor = nouveau_barobj_dtor,
86 .init = nouveau_object_init,
87 .fini = nouveau_object_fini,
88 .rd32 = nouveau_barobj_rd32,
89 .wr32 = nouveau_barobj_wr32,
90 },
91};
92
93int
94nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
95 struct nouveau_mem *mem, struct nouveau_object **pobject)
96{
97 struct nouveau_object *engine = nv_object(bar);
98 return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
99 mem, 0, pobject);
100}
101
102int
103nouveau_bar_create_(struct nouveau_object *parent,
104 struct nouveau_object *engine,
105 struct nouveau_oclass *oclass, int length, void **pobject)
106{
107 struct nouveau_device *device = nv_device(parent);
108 struct nouveau_bar *bar;
109 int ret;
110
111 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
112 "bar", length, pobject);
113 bar = *pobject;
114 if (ret)
115 return ret;
116
117 bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
118 pci_resource_len(device->pdev, 3));
119 return 0;
120}
121
122void
123nouveau_bar_destroy(struct nouveau_bar *bar)
124{
125 if (bar->iomem)
126 iounmap(bar->iomem);
127 nouveau_subdev_destroy(&bar->base);
128}
129
130void
131_nouveau_bar_dtor(struct nouveau_object *object)
132{
133 struct nouveau_bar *bar = (void *)object;
134 nouveau_bar_destroy(bar);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
new file mode 100644
index 000000000000..c3acf5b70d9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nv50_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct nouveau_gpuobj *mem;
36 struct nouveau_gpuobj *pad;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *bar1_vm;
39 struct nouveau_gpuobj *bar1;
40 struct nouveau_vm *bar3_vm;
41 struct nouveau_gpuobj *bar3;
42};
43
44static int
45nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
46 u32 flags, struct nouveau_vma *vma)
47{
48 struct nv50_bar_priv *priv = (void *)bar;
49 int ret;
50
51 ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
52 if (ret)
53 return ret;
54
55 nouveau_vm_map(vma, mem);
56 nv50_vm_flush_engine(nv_subdev(bar), 6);
57 return 0;
58}
59
60static int
61nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
62 u32 flags, struct nouveau_vma *vma)
63{
64 struct nv50_bar_priv *priv = (void *)bar;
65 int ret;
66
67 ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
68 if (ret)
69 return ret;
70
71 nouveau_vm_map(vma, mem);
72 nv50_vm_flush_engine(nv_subdev(bar), 6);
73 return 0;
74}
75
76static void
77nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
78{
79 nouveau_vm_unmap(vma);
80 nv50_vm_flush_engine(nv_subdev(bar), 6);
81 nouveau_vm_put(vma);
82}
83
84static void
85nv50_bar_flush(struct nouveau_bar *bar)
86{
87 struct nv50_bar_priv *priv = (void *)bar;
88 unsigned long flags;
89 spin_lock_irqsave(&priv->lock, flags);
90 nv_wr32(priv, 0x00330c, 0x00000001);
91 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
92 nv_warn(priv, "flush timeout\n");
93 spin_unlock_irqrestore(&priv->lock, flags);
94}
95
96void
97nv84_bar_flush(struct nouveau_bar *bar)
98{
99 struct nv50_bar_priv *priv = (void *)bar;
100 unsigned long flags;
101 spin_lock_irqsave(&priv->lock, flags);
102 nv_wr32(bar, 0x070000, 0x00000001);
103 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
104 nv_warn(priv, "flush timeout\n");
105 spin_unlock_irqrestore(&priv->lock, flags);
106}
107
108static int
109nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_device *device = nv_device(parent);
114 struct nouveau_object *heap;
115 struct nouveau_vm *vm;
116 struct nv50_bar_priv *priv;
117 u64 start, limit;
118 int ret;
119
120 ret = nouveau_bar_create(parent, engine, oclass, &priv);
121 *pobject = nv_object(priv);
122 if (ret)
123 return ret;
124
125 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
126 &priv->mem);
127 heap = nv_object(priv->mem);
128 if (ret)
129 return ret;
130
131 ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
132 0x1400 : 0x0200, 0, 0, &priv->pad);
133 if (ret)
134 return ret;
135
136 ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
137 if (ret)
138 return ret;
139
140 /* BAR3 */
141 start = 0x0100000000ULL;
142 limit = start + pci_resource_len(device->pdev, 3);
143
144 ret = nouveau_vm_new(device, start, limit, start, &vm);
145 if (ret)
146 return ret;
147
148 ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
149 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
150 &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1;
152 if (ret)
153 return ret;
154
155 ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
156 nouveau_vm_ref(NULL, &vm, NULL);
157 if (ret)
158 return ret;
159
160 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
161 if (ret)
162 return ret;
163
164 nv_wo32(priv->bar3, 0x00, 0x7fc00000);
165 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
166 nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
167 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
168 upper_32_bits(start));
169 nv_wo32(priv->bar3, 0x10, 0x00000000);
170 nv_wo32(priv->bar3, 0x14, 0x00000000);
171
172 /* BAR1 */
173 start = 0x0000000000ULL;
174 limit = start + pci_resource_len(device->pdev, 1);
175
176 ret = nouveau_vm_new(device, start, limit--, start, &vm);
177 if (ret)
178 return ret;
179
180 ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
181 nouveau_vm_ref(NULL, &vm, NULL);
182 if (ret)
183 return ret;
184
185 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
186 if (ret)
187 return ret;
188
189 nv_wo32(priv->bar1, 0x00, 0x7fc00000);
190 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
191 nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
192 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
193 upper_32_bits(start));
194 nv_wo32(priv->bar1, 0x10, 0x00000000);
195 nv_wo32(priv->bar1, 0x14, 0x00000000);
196
197 priv->base.alloc = nouveau_bar_alloc;
198 priv->base.kmap = nv50_bar_kmap;
199 priv->base.umap = nv50_bar_umap;
200 priv->base.unmap = nv50_bar_unmap;
201 if (device->chipset == 0x50)
202 priv->base.flush = nv50_bar_flush;
203 else
204 priv->base.flush = nv84_bar_flush;
205 spin_lock_init(&priv->lock);
206 return 0;
207}
208
209static void
210nv50_bar_dtor(struct nouveau_object *object)
211{
212 struct nv50_bar_priv *priv = (void *)object;
213 nouveau_gpuobj_ref(NULL, &priv->bar1);
214 nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
215 nouveau_gpuobj_ref(NULL, &priv->bar3);
216 if (priv->bar3_vm) {
217 nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
219 }
220 nouveau_gpuobj_ref(NULL, &priv->pgd);
221 nouveau_gpuobj_ref(NULL, &priv->pad);
222 nouveau_gpuobj_ref(NULL, &priv->mem);
223 nouveau_bar_destroy(&priv->base);
224}
225
226static int
227nv50_bar_init(struct nouveau_object *object)
228{
229 struct nv50_bar_priv *priv = (void *)object;
230 int ret;
231
232 ret = nouveau_bar_init(&priv->base);
233 if (ret)
234 return ret;
235
236 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
237 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
238 nv50_vm_flush_engine(nv_subdev(priv), 6);
239
240 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
241 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
242 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
243 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
244 return 0;
245}
246
247static int
248nv50_bar_fini(struct nouveau_object *object, bool suspend)
249{
250 struct nv50_bar_priv *priv = (void *)object;
251 return nouveau_bar_fini(&priv->base, suspend);
252}
253
254struct nouveau_oclass
255nv50_bar_oclass = {
256 .handle = NV_SUBDEV(BAR, 0x50),
257 .ofuncs = &(struct nouveau_ofuncs) {
258 .ctor = nv50_bar_ctor,
259 .dtor = nv50_bar_dtor,
260 .init = nv50_bar_init,
261 .fini = nv50_bar_fini,
262 },
263};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
new file mode 100644
index 000000000000..77a6fb725d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nvc0_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct {
36 struct nouveau_gpuobj *mem;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *vm;
39 } bar[2];
40};
41
42static int
43nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
44 u32 flags, struct nouveau_vma *vma)
45{
46 struct nvc0_bar_priv *priv = (void *)bar;
47 int ret;
48
49 ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
50 if (ret)
51 return ret;
52
53 nouveau_vm_map(vma, mem);
54 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
55 return 0;
56}
57
58static int
59nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
60 u32 flags, struct nouveau_vma *vma)
61{
62 struct nvc0_bar_priv *priv = (void *)bar;
63 int ret;
64
65 ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
66 mem->page_shift, flags, vma);
67 if (ret)
68 return ret;
69
70 nouveau_vm_map(vma, mem);
71 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
72 return 0;
73}
74
75static void
76nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
77{
78 struct nvc0_bar_priv *priv = (void *)bar;
79 int i = !(vma->vm == priv->bar[0].vm);
80
81 nouveau_vm_unmap(vma);
82 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
83 nouveau_vm_put(vma);
84}
85
86static int
87nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_device *device = nv_device(parent);
92 struct pci_dev *pdev = device->pdev;
93 struct nvc0_bar_priv *priv;
94 struct nouveau_gpuobj *mem;
95 struct nouveau_vm *vm;
96 int ret;
97
98 ret = nouveau_bar_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 /* BAR3 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
105 mem = priv->bar[0].mem;
106 if (ret)
107 return ret;
108
109 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
110 if (ret)
111 return ret;
112
113 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
114 if (ret)
115 return ret;
116
117 ret = nouveau_gpuobj_new(parent, NULL,
118 (pci_resource_len(pdev, 3) >> 12) * 8,
119 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
120 &vm->pgt[0].obj[0]);
121 vm->pgt[0].refcount[0] = 1;
122 if (ret)
123 return ret;
124
125 ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
126 nouveau_vm_ref(NULL, &vm, NULL);
127 if (ret)
128 return ret;
129
130 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
131 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
132 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
133 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
134
135 /* BAR1 */
136 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
137 mem = priv->bar[1].mem;
138 if (ret)
139 return ret;
140
141 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
142 if (ret)
143 return ret;
144
145 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
146 if (ret)
147 return ret;
148
149 ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
150 nouveau_vm_ref(NULL, &vm, NULL);
151 if (ret)
152 return ret;
153
154 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
155 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
156 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
157 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
158
159 priv->base.alloc = nouveau_bar_alloc;
160 priv->base.kmap = nvc0_bar_kmap;
161 priv->base.umap = nvc0_bar_umap;
162 priv->base.unmap = nvc0_bar_unmap;
163 priv->base.flush = nv84_bar_flush;
164 spin_lock_init(&priv->lock);
165 return 0;
166}
167
168static void
169nvc0_bar_dtor(struct nouveau_object *object)
170{
171 struct nvc0_bar_priv *priv = (void *)object;
172
173 nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
174 nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
175 nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
176
177 if (priv->bar[0].vm) {
178 nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
179 nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
180 }
181 nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
182 nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
183
184 nouveau_bar_destroy(&priv->base);
185}
186
187static int
188nvc0_bar_init(struct nouveau_object *object)
189{
190 struct nvc0_bar_priv *priv = (void *)object;
191 int ret;
192
193 ret = nouveau_bar_init(&priv->base);
194 if (ret)
195 return ret;
196
197 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
198 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
199 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
200
201 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
202 nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
203 return 0;
204}
205
206struct nouveau_oclass
207nvc0_bar_oclass = {
208 .handle = NV_SUBDEV(BAR, 0xc0),
209 .ofuncs = &(struct nouveau_ofuncs) {
210 .ctor = nvc0_bar_ctor,
211 .dtor = nvc0_bar_dtor,
212 .init = nvc0_bar_init,
213 .fini = _nouveau_bar_fini,
214 },
215};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
new file mode 100644
index 000000000000..2fbb6df697cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -0,0 +1,479 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27#include <core/subdev.h>
28#include <core/option.h>
29
30#include <subdev/bios.h>
31#include <subdev/bios/bmp.h>
32#include <subdev/bios/bit.h>
33
34u8
35nvbios_checksum(const u8 *data, int size)
36{
37 u8 sum = 0;
38 while (size--)
39 sum += *data++;
40 return sum;
41}
42
43u16
44nvbios_findstr(const u8 *data, int size, const char *str, int len)
45{
46 int i, j;
47
48 for (i = 0; i <= (size - len); i++) {
49 for (j = 0; j < len; j++)
50 if ((char)data[i + j] != str[j])
51 break;
52 if (j == len)
53 return i;
54 }
55
56 return 0;
57}
58
59#if defined(__powerpc__)
60static void
61nouveau_bios_shadow_of(struct nouveau_bios *bios)
62{
63 struct pci_dev *pdev = nv_device(bios)->pdev;
64 struct device_node *dn;
65 const u32 *data;
66 int size, i;
67
68 dn = pci_device_to_OF_node(pdev);
69 if (!dn) {
70 nv_info(bios, "Unable to get the OF node\n");
71 return;
72 }
73
74 data = of_get_property(dn, "NVDA,BMP", &size);
75 if (data) {
76 bios->size = size;
77 bios->data = kmalloc(bios->size, GFP_KERNEL);
78 if (bios->data)
79 memcpy(bios->data, data, size);
80 }
81}
82#endif
83
84static void
85nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
86{
87 struct nouveau_device *device = nv_device(bios);
88 u32 bar0 = 0;
89 int i;
90
91 if (device->card_type >= NV_50) {
92 u64 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8;
93 if (!addr) {
94 addr = (u64)nv_rd32(bios, 0x001700) << 16;
95 addr += 0xf0000;
96 }
97
98 bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
99 }
100
101 /* bail if no rom signature */
102 if (nv_rd08(bios, 0x700000) != 0x55 ||
103 nv_rd08(bios, 0x700001) != 0xaa)
104 goto out;
105
106 bios->size = nv_rd08(bios, 0x700002) * 512;
107 bios->data = kmalloc(bios->size, GFP_KERNEL);
108 if (bios->data) {
109 for (i = 0; i < bios->size; i++)
110 nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
111 }
112
113out:
114 if (device->card_type >= NV_50)
115 nv_wr32(bios, 0x001700, bar0);
116}
117
118static void
119nouveau_bios_shadow_prom(struct nouveau_bios *bios)
120{
121 struct nouveau_device *device = nv_device(bios);
122 u32 pcireg, access;
123 u16 pcir;
124 int i;
125
126 /* enable access to rom */
127 if (device->card_type >= NV_50)
128 pcireg = 0x088050;
129 else
130 pcireg = 0x001850;
131 access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
132
133 /* bail if no rom signature, with a workaround for a PROM reading
134 * issue on some chipsets. the first read after a period of
135 * inactivity returns the wrong result, so retry the first header
136 * byte a few times before giving up as a workaround
137 */
138 i = 16;
139 do {
140 if (nv_rd08(bios, 0x300000) == 0x55)
141 break;
142 } while (i--);
143
144 if (!i || nv_rd08(bios, 0x300001) != 0xaa)
145 goto out;
146
147 /* additional check (see note below) - read PCI record header */
148 pcir = nv_rd08(bios, 0x300018) |
149 nv_rd08(bios, 0x300019) << 8;
150 if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
151 nv_rd08(bios, 0x300001 + pcir) != 'C' ||
152 nv_rd08(bios, 0x300002 + pcir) != 'I' ||
153 nv_rd08(bios, 0x300003 + pcir) != 'R')
154 goto out;
155
156 /* read entire bios image to system memory */
157 bios->size = nv_rd08(bios, 0x300002) * 512;
158 bios->data = kmalloc(bios->size, GFP_KERNEL);
159 if (bios->data) {
160 for (i = 0; i < bios->size; i++)
161 nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i));
162 }
163
164out:
165 /* disable access to rom */
166 nv_wr32(bios, pcireg, access);
167}
168
169#if defined(CONFIG_ACPI)
170int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
171bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
172#else
173static inline bool
174nouveau_acpi_rom_supported(struct pci_dev *pdev) {
175 return false;
176}
177
178static inline int
179nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
180 return -EINVAL;
181}
182#endif
183
184static void
185nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
186{
187 struct pci_dev *pdev = nv_device(bios)->pdev;
188 int cnt = 65536 / 4096;
189 int ret;
190
191 if (!nouveau_acpi_rom_supported(pdev))
192 return;
193
194 bios->data = kmalloc(65536, GFP_KERNEL);
195 bios->size = 0;
196 if (!bios->data)
197 return;
198
199 while (cnt--) {
200 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->size, 4096);
201 if (ret != 4096)
202 return;
203
204 bios->size += 4096;
205 }
206}
207
208static void
209nouveau_bios_shadow_pci(struct nouveau_bios *bios)
210{
211 struct pci_dev *pdev = nv_device(bios)->pdev;
212 size_t size;
213
214 if (!pci_enable_rom(pdev)) {
215 void __iomem *rom = pci_map_rom(pdev, &size);
216 if (rom && size) {
217 bios->data = kmalloc(size, GFP_KERNEL);
218 if (bios->data) {
219 memcpy_fromio(bios->data, rom, size);
220 bios->size = size;
221 }
222 }
223 if (rom)
224 pci_unmap_rom(pdev, rom);
225
226 pci_disable_rom(pdev);
227 }
228}
229
230static int
231nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
232{
233 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
234 nv_info(bios, "... signature not found\n");
235 return 0;
236 }
237
238 if (nvbios_checksum(bios->data, bios->data[2] * 512)) {
239 nv_info(bios, "... checksum invalid\n");
240 /* if a ro image is somewhat bad, it's probably all rubbish */
241 return writeable ? 2 : 1;
242 }
243
244 nv_info(bios, "... appears to be valid\n");
245 return 3;
246}
247
248struct methods {
249 const char desc[16];
250 void (*shadow)(struct nouveau_bios *);
251 const bool rw;
252 int score;
253 u32 size;
254 u8 *data;
255};
256
257static int
258nouveau_bios_shadow(struct nouveau_bios *bios)
259{
260 struct methods shadow_methods[] = {
261#if defined(__powerpc__)
262 { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
263#endif
264 { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
265 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
266 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
267 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
268 {}
269 };
270 struct methods *mthd, *best;
271 const struct firmware *fw;
272 const char *optarg;
273 int optlen, ret;
274 char *source;
275
276 optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
277 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
278 if (source) {
279 /* try to match one of the built-in methods */
280 mthd = shadow_methods;
281 do {
282 if (strcasecmp(source, mthd->desc))
283 continue;
284 nv_info(bios, "source: %s\n", mthd->desc);
285
286 mthd->shadow(bios);
287 mthd->score = nouveau_bios_score(bios, mthd->rw);
288 if (mthd->score) {
289 kfree(source);
290 return 0;
291 }
292 } while ((++mthd)->shadow);
293
294 /* attempt to load firmware image */
295 ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
296 if (ret == 0) {
297 bios->size = fw->size;
298 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
299 release_firmware(fw);
300
301 nv_info(bios, "image: %s\n", source);
302 if (nouveau_bios_score(bios, 1)) {
303 kfree(source);
304 return 0;
305 }
306
307 kfree(bios->data);
308 bios->data = NULL;
309 }
310
311 nv_error(bios, "source \'%s\' invalid\n", source);
312 kfree(source);
313 }
314
315 mthd = shadow_methods;
316 do {
317 nv_info(bios, "checking %s for image...\n", mthd->desc);
318 mthd->shadow(bios);
319 mthd->score = nouveau_bios_score(bios, mthd->rw);
320 mthd->size = bios->size;
321 mthd->data = bios->data;
322 bios->data = NULL;
323 } while (mthd->score != 3 && (++mthd)->shadow);
324
325 mthd = shadow_methods;
326 best = mthd;
327 do {
328 if (mthd->score > best->score) {
329 kfree(best->data);
330 best = mthd;
331 }
332 } while ((++mthd)->shadow);
333
334 if (best->score) {
335 nv_info(bios, "using image from %s\n", best->desc);
336 bios->size = best->size;
337 bios->data = best->data;
338 return 0;
339 }
340
341 nv_error(bios, "unable to locate usable image\n");
342 return -EINVAL;
343}
344
345static u8
346nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
347{
348 struct nouveau_bios *bios = (void *)object;
349 return bios->data[addr];
350}
351
352static u16
353nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
354{
355 struct nouveau_bios *bios = (void *)object;
356 return get_unaligned_le16(&bios->data[addr]);
357}
358
359static u32
360nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
361{
362 struct nouveau_bios *bios = (void *)object;
363 return get_unaligned_le32(&bios->data[addr]);
364}
365
366static void
367nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
368{
369 struct nouveau_bios *bios = (void *)object;
370 bios->data[addr] = data;
371}
372
373static void
374nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
375{
376 struct nouveau_bios *bios = (void *)object;
377 put_unaligned_le16(data, &bios->data[addr]);
378}
379
380static void
381nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
382{
383 struct nouveau_bios *bios = (void *)object;
384 put_unaligned_le32(data, &bios->data[addr]);
385}
386
387static int
388nouveau_bios_ctor(struct nouveau_object *parent,
389 struct nouveau_object *engine,
390 struct nouveau_oclass *oclass, void *data, u32 size,
391 struct nouveau_object **pobject)
392{
393 struct nouveau_bios *bios;
394 struct bit_entry bit_i;
395 int ret;
396
397 ret = nouveau_subdev_create(parent, engine, oclass, 0,
398 "VBIOS", "bios", &bios);
399 *pobject = nv_object(bios);
400 if (ret)
401 return ret;
402
403 ret = nouveau_bios_shadow(bios);
404 if (ret)
405 return ret;
406
407 /* detect type of vbios we're dealing with */
408 bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
409 "\xff\x7f""NV\0", 5);
410 if (bios->bmp_offset) {
411 nv_info(bios, "BMP version %x.%x\n",
412 bmp_version(bios) >> 8,
413 bmp_version(bios) & 0xff);
414 }
415
416 bios->bit_offset = nvbios_findstr(bios->data, bios->size,
417 "\xff\xb8""BIT", 5);
418 if (bios->bit_offset)
419 nv_info(bios, "BIT signature found\n");
420
421 /* determine the vbios version number */
422 if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
423 bios->version.major = nv_ro08(bios, bit_i.offset + 3);
424 bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
425 bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
426 bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
427 } else
428 if (bmp_version(bios)) {
429 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
430 bios->version.chip = nv_ro08(bios, bios->bmp_offset + 12);
431 bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
432 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
433 }
434
435 nv_info(bios, "version %02x.%02x.%02x.%02x\n",
436 bios->version.major, bios->version.chip,
437 bios->version.minor, bios->version.micro);
438
439 return 0;
440}
441
442static void
443nouveau_bios_dtor(struct nouveau_object *object)
444{
445 struct nouveau_bios *bios = (void *)object;
446 kfree(bios->data);
447 nouveau_subdev_destroy(&bios->base);
448}
449
450static int
451nouveau_bios_init(struct nouveau_object *object)
452{
453 struct nouveau_bios *bios = (void *)object;
454 return nouveau_subdev_init(&bios->base);
455}
456
457static int
458nouveau_bios_fini(struct nouveau_object *object, bool suspend)
459{
460 struct nouveau_bios *bios = (void *)object;
461 return nouveau_subdev_fini(&bios->base, suspend);
462}
463
464struct nouveau_oclass
465nouveau_bios_oclass = {
466 .handle = NV_SUBDEV(VBIOS, 0x00),
467 .ofuncs = &(struct nouveau_ofuncs) {
468 .ctor = nouveau_bios_ctor,
469 .dtor = nouveau_bios_dtor,
470 .init = nouveau_bios_init,
471 .fini = nouveau_bios_fini,
472 .rd08 = nouveau_bios_rd08,
473 .rd16 = nouveau_bios_rd16,
474 .rd32 = nouveau_bios_rd32,
475 .wr08 = nouveau_bios_wr08,
476 .wr16 = nouveau_bios_wr16,
477 .wr32 = nouveau_bios_wr32,
478 },
479};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
new file mode 100644
index 000000000000..1d03a3f2b2d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/object.h"
26
27#include "subdev/bios.h"
28#include "subdev/bios/bit.h"
29
30int
31bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit)
32{
33 if (likely(bios->bit_offset)) {
34 u8 entries = nv_ro08(bios, bios->bit_offset + 10);
35 u32 entry = bios->bit_offset + 12;
36 while (entries--) {
37 if (nv_ro08(bios, entry + 0) == id) {
38 bit->id = nv_ro08(bios, entry + 0);
39 bit->version = nv_ro08(bios, entry + 1);
40 bit->length = nv_ro16(bios, entry + 2);
41 bit->offset = nv_ro16(bios, entry + 4);
42 return 0;
43 }
44
45 entry += nv_ro08(bios, bios->bit_offset + 9);
46 }
47
48 return -ENOENT;
49 }
50
51 return -EINVAL;
52}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
new file mode 100644
index 000000000000..5ac010efd959
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/dcb.h>
29#include <subdev/bios/conn.h>
30
31u16
32dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{
34 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
35 if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
36 u16 data = nv_ro16(bios, dcb + 0x14);
37 if (data) {
38 *ver = nv_ro08(bios, data + 0);
39 *hdr = nv_ro08(bios, data + 1);
40 *cnt = nv_ro08(bios, data + 2);
41 *len = nv_ro08(bios, data + 3);
42 return data;
43 }
44 }
45 return 0x0000;
46}
47
48u16
49dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
50{
51 u8 hdr, cnt;
52 u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len);
53 if (data && idx < cnt)
54 return data + hdr + (idx * *len);
55 return 0x0000;
56}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
new file mode 100644
index 000000000000..9ed6e728a94c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/device.h"
26
27#include "subdev/bios.h"
28#include "subdev/bios/dcb.h"
29
30u16
31dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 struct nouveau_device *device = nv_device(bios);
34 u16 dcb = 0x0000;
35
36 if (device->card_type > NV_04)
37 dcb = nv_ro16(bios, 0x36);
38 if (!dcb) {
39 nv_warn(bios, "DCB table not found\n");
40 return dcb;
41 }
42
43 *ver = nv_ro08(bios, dcb);
44
45 if (*ver >= 0x41) {
46 nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver);
47 return 0x0000;
48 } else
49 if (*ver >= 0x30) {
50 if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
51 *hdr = nv_ro08(bios, dcb + 1);
52 *cnt = nv_ro08(bios, dcb + 2);
53 *len = nv_ro08(bios, dcb + 3);
54 return dcb;
55 }
56 } else
57 if (*ver >= 0x20) {
58 if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
59 u16 i2c = nv_ro16(bios, dcb + 2);
60 *hdr = 8;
61 *cnt = (i2c - dcb) / 8;
62 *len = 8;
63 return dcb;
64 }
65 } else
66 if (*ver >= 0x15) {
67 if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
68 u16 i2c = nv_ro16(bios, dcb + 2);
69 *hdr = 4;
70 *cnt = (i2c - dcb) / 10;
71 *len = 10;
72 return dcb;
73 }
74 } else {
75 /*
76 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
77 * always has the same single (crt) entry, even when tv-out
78 * present, so the conclusion is this version cannot really
79 * be used.
80 *
81 * v1.2 tables (some NV6/10, and NV15+) normally have the
82 * same 5 entries, which are not specific to the card and so
83 * no use.
84 *
85 * v1.2 does have an I2C table that read_dcb_i2c_table can
86 * handle, but cards exist (nv11 in #14821) with a bad i2c
87 * table pointer, so use the indices parsed in
88 * parse_bmp_structure.
89 *
90 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
91 */
92 nv_warn(bios, "DCB contains no useful data\n");
93 return 0x0000;
94 }
95
96 nv_warn(bios, "DCB header validation failed\n");
97 return 0x0000;
98}
99
100u16
101dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
102{
103 u8 hdr, cnt;
104 u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
105 if (dcb && idx < cnt)
106 return dcb + hdr + (idx * *len);
107 return 0x0000;
108}
109
110int
111dcb_outp_foreach(struct nouveau_bios *bios, void *data,
112 int (*exec)(struct nouveau_bios *, void *, int, u16))
113{
114 int ret, idx = -1;
115 u8 ver, len;
116 u16 outp;
117
118 while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
119 if (nv_ro32(bios, outp) == 0x00000000)
120 break; /* seen on an NV11 with DCB v1.5 */
121 if (nv_ro32(bios, outp) == 0xffffffff)
122 break; /* seen on an NV17 with DCB v2.0 */
123
124 if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
125 continue;
126 if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
127 break;
128
129 ret = exec(bios, data, idx, outp);
130 if (ret)
131 return ret;
132 }
133
134 return 0;
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
new file mode 100644
index 000000000000..3cbc0f3e8d5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25
26#include "subdev/bios.h"
27#include "subdev/bios/bit.h"
28#include "subdev/bios/dcb.h"
29#include "subdev/bios/dp.h"
30
31u16
32dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{
34 struct bit_entry bit_d;
35
36 if (!bit_entry(bios, 'd', &bit_d)) {
37 if (bit_d.version == 1) {
38 u16 data = nv_ro16(bios, bit_d.offset);
39 if (data) {
40 *ver = nv_ro08(bios, data + 0);
41 *hdr = nv_ro08(bios, data + 1);
42 *len = nv_ro08(bios, data + 2);
43 *cnt = nv_ro08(bios, data + 3);
44 return data;
45 }
46 }
47 }
48
49 return 0x0000;
50}
51
52u16
53dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
54{
55 u8 hdr, cnt;
56 u16 table = dp_table(bios, ver, &hdr, &cnt, len);
57 if (table && idx < cnt)
58 return nv_ro16(bios, table + hdr + (idx * *len));
59 return 0xffff;
60}
61
62u16
63dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
64 u8 *ver, u8 *len)
65{
66 u8 idx = 0;
67 u16 data;
68 while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
69 if (data) {
70 u32 hash = nv_ro32(bios, data);
71 if (dcb_hash_match(outp, hash))
72 return data;
73 }
74 }
75 return 0x0000;
76}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
new file mode 100644
index 000000000000..5afb568b2d69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/extdev.h>
28
29static u16
30extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
31{
32 u8 dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
33 u16 dcb, extdev = 0;
34
35 dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
36 if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
37 return 0x0000;
38
39 extdev = nv_ro16(bios, dcb + 18);
40 if (!extdev)
41 return 0x0000;
42
43 *ver = nv_ro08(bios, extdev + 0);
44 *hdr = nv_ro08(bios, extdev + 1);
45 *cnt = nv_ro08(bios, extdev + 2);
46 *len = nv_ro08(bios, extdev + 3);
47
48 return extdev + *hdr;
49}
50
51u16
52nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
53{
54 u8 hdr, cnt;
55 u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
56 if (extdev && idx < cnt)
57 return extdev + idx * *len;
58 return 0x0000;
59}
60
61static void
62extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
63 struct nvbios_extdev_func *entry)
64{
65 entry->type = nv_ro08(bios, offset + 0);
66 entry->addr = nv_ro08(bios, offset + 1);
67 entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
68}
69
70int
71nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
72 struct nvbios_extdev_func *func)
73{
74 u8 ver, len;
75 u16 entry;
76
77 if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len)))
78 return -EINVAL;
79
80 extdev_parse_entry(bios, entry, func);
81
82 return 0;
83}
84
85int
86nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
87 struct nvbios_extdev_func *func)
88{
89 u8 ver, len, i;
90 u16 entry;
91
92 i = 0;
93 while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
94 extdev_parse_entry(bios, entry, func);
95 if (func->type == type)
96 return 0;
97 }
98
99 return -EINVAL;
100}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
new file mode 100644
index 000000000000..4c9f1e508165
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/gpio.h>
28
29u16
30dcb_gpio_table(struct nouveau_bios *bios)
31{
32 u8 ver, hdr, cnt, len;
33 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
34 if (dcb) {
35 if (ver >= 0x30 && hdr >= 0x0c)
36 return nv_ro16(bios, dcb + 0x0a);
37 if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
38 return nv_ro16(bios, dcb - 0x0f);
39 }
40 return 0x0000;
41}
42
43u16
44dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
45{
46 u16 gpio = dcb_gpio_table(bios);
47 if (gpio) {
48 *ver = nv_ro08(bios, gpio);
49 if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
50 return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
51 else if (ent < nv_ro08(bios, gpio + 2))
52 return gpio + nv_ro08(bios, gpio + 1) +
53 (ent * nv_ro08(bios, gpio + 3));
54 }
55 return 0x0000;
56}
57
58int
59dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
60 struct dcb_gpio_func *gpio)
61{
62 u8 ver, hdr, cnt, len;
63 u16 entry;
64 int i = -1;
65
66 while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
67 if (ver < 0x40) {
68 u16 data = nv_ro16(bios, entry);
69 *gpio = (struct dcb_gpio_func) {
70 .line = (data & 0x001f) >> 0,
71 .func = (data & 0x07e0) >> 5,
72 .log[0] = (data & 0x1800) >> 11,
73 .log[1] = (data & 0x6000) >> 13,
74 .param = !!(data & 0x8000),
75 };
76 } else
77 if (ver < 0x41) {
78 u32 data = nv_ro32(bios, entry);
79 *gpio = (struct dcb_gpio_func) {
80 .line = (data & 0x0000001f) >> 0,
81 .func = (data & 0x0000ff00) >> 8,
82 .log[0] = (data & 0x18000000) >> 27,
83 .log[1] = (data & 0x60000000) >> 29,
84 .param = !!(data & 0x80000000),
85 };
86 } else {
87 u32 data = nv_ro32(bios, entry + 0);
88 u8 data1 = nv_ro32(bios, entry + 4);
89 *gpio = (struct dcb_gpio_func) {
90 .line = (data & 0x0000003f) >> 0,
91 .func = (data & 0x0000ff00) >> 8,
92 .log[0] = (data1 & 0x30) >> 4,
93 .log[1] = (data1 & 0xc0) >> 6,
94 .param = !!(data & 0x80000000),
95 };
96 }
97
98 if ((line == 0xff || line == gpio->line) &&
99 (func == 0xff || func == gpio->func))
100 return 0;
101 }
102
103 /* DCB 2.2, fixed TVDAC GPIO data */
104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
105 if (func == DCB_GPIO_TVDAC0) {
106 u8 conf = nv_ro08(bios, entry - 5);
107 u8 addr = nv_ro08(bios, entry - 4);
108 if (conf & 0x01) {
109 *gpio = (struct dcb_gpio_func) {
110 .func = DCB_GPIO_TVDAC0,
111 .line = addr >> 4,
112 .log[0] = !!(conf & 0x02),
113 .log[1] = !(conf & 0x02),
114 };
115 return 0;
116 }
117 }
118 }
119
120 return -EINVAL;
121}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
new file mode 100644
index 000000000000..ad577db83766
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25
26#include "subdev/bios.h"
27#include "subdev/bios/dcb.h"
28#include "subdev/bios/i2c.h"
29
30u16
31dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 u16 i2c = 0x0000;
34 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
35 if (dcb) {
36 if (*ver >= 0x15)
37 i2c = nv_ro16(bios, dcb + 2);
38 if (*ver >= 0x30)
39 i2c = nv_ro16(bios, dcb + 4);
40 }
41
42 if (i2c && *ver >= 0x30) {
43 *ver = nv_ro08(bios, i2c + 0);
44 *hdr = nv_ro08(bios, i2c + 1);
45 *cnt = nv_ro08(bios, i2c + 2);
46 *len = nv_ro08(bios, i2c + 3);
47 } else {
48 *ver = *ver; /* use DCB version */
49 *hdr = 0;
50 *cnt = 16;
51 *len = 4;
52 }
53
54 return i2c;
55}
56
57u16
58dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
59{
60 u8 hdr, cnt;
61 u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
62 if (i2c && idx < cnt)
63 return i2c + hdr + (idx * *len);
64 return 0x0000;
65}
66
67int
68dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
69{
70 u8 ver, len;
71 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
72 if (ent) {
73 info->data = nv_ro32(bios, ent + 0);
74 info->type = nv_ro08(bios, ent + 3);
75 if (ver < 0x30) {
76 info->type &= 0x07;
77 if (info->type == 0x07)
78 info->type = 0xff;
79 }
80
81 switch (info->type) {
82 case DCB_I2C_NV04_BIT:
83 info->drive = nv_ro08(bios, ent + 0);
84 info->sense = nv_ro08(bios, ent + 1);
85 return 0;
86 case DCB_I2C_NV4E_BIT:
87 info->drive = nv_ro08(bios, ent + 1);
88 return 0;
89 case DCB_I2C_NVIO_BIT:
90 case DCB_I2C_NVIO_AUX:
91 info->drive = nv_ro08(bios, ent + 0);
92 return 0;
93 case DCB_I2C_UNUSED:
94 return 0;
95 default:
96 nv_warn(bios, "unknown i2c type %d\n", info->type);
97 info->type = DCB_I2C_UNUSED;
98 return 0;
99 }
100 }
101
102 if (bios->bmp_offset && idx < 2) {
103 /* BMP (from v4.0 has i2c info in the structure, it's in a
104 * fixed location on earlier VBIOS
105 */
106 if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
107 ent = 0x0048;
108 else
109 ent = 0x0036 + bios->bmp_offset;
110
111 if (idx == 0) {
112 info->drive = nv_ro08(bios, ent + 4);
113 if (!info->drive) info->drive = 0x3f;
114 info->sense = nv_ro08(bios, ent + 5);
115 if (!info->sense) info->sense = 0x3e;
116 } else
117 if (idx == 1) {
118 info->drive = nv_ro08(bios, ent + 6);
119 if (!info->drive) info->drive = 0x37;
120 info->sense = nv_ro08(bios, ent + 7);
121 if (!info->sense) info->sense = 0x36;
122 }
123
124 info->type = DCB_I2C_NV04_BIT;
125 return 0;
126 }
127
128 return -ENOENT;
129}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
new file mode 100644
index 000000000000..6be8c32f6e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -0,0 +1,2120 @@
1#include <core/engine.h>
2#include <core/device.h>
3
4#include <subdev/bios.h>
5#include <subdev/bios/conn.h>
6#include <subdev/bios/bmp.h>
7#include <subdev/bios/bit.h>
8#include <subdev/bios/dcb.h>
9#include <subdev/bios/dp.h>
10#include <subdev/bios/init.h>
11#include <subdev/devinit.h>
12#include <subdev/clock.h>
13#include <subdev/i2c.h>
14#include <subdev/vga.h>
15#include <subdev/gpio.h>
16
17#define bioslog(lvl, fmt, args...) do { \
18 nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \
19 init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args); \
20} while(0)
21#define cont(fmt, args...) do { \
22 if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE) \
23 printk(fmt, ##args); \
24} while(0)
25#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
26#define warn(fmt, args...) bioslog(WARN, fmt, ##args)
27#define error(fmt, args...) bioslog(ERROR, fmt, ##args)
28
29/******************************************************************************
30 * init parser control flow helpers
31 *****************************************************************************/
32
33static inline bool
34init_exec(struct nvbios_init *init)
35{
36 return (init->execute == 1) || ((init->execute & 5) == 5);
37}
38
39static inline void
40init_exec_set(struct nvbios_init *init, bool exec)
41{
42 if (exec) init->execute &= 0xfd;
43 else init->execute |= 0x02;
44}
45
46static inline void
47init_exec_inv(struct nvbios_init *init)
48{
49 init->execute ^= 0x02;
50}
51
52static inline void
53init_exec_force(struct nvbios_init *init, bool exec)
54{
55 if (exec) init->execute |= 0x04;
56 else init->execute &= 0xfb;
57}
58
59/******************************************************************************
60 * init parser wrappers for normal register/i2c/whatever accessors
61 *****************************************************************************/
62
63static inline int
64init_or(struct nvbios_init *init)
65{
66 if (init->outp)
67 return ffs(init->outp->or) - 1;
68 error("script needs OR!!\n");
69 return 0;
70}
71
72static inline int
73init_link(struct nvbios_init *init)
74{
75 if (init->outp)
76 return !(init->outp->sorconf.link & 1);
77 error("script needs OR link\n");
78 return 0;
79}
80
81static inline int
82init_crtc(struct nvbios_init *init)
83{
84 if (init->crtc >= 0)
85 return init->crtc;
86 error("script needs crtc\n");
87 return 0;
88}
89
90static u8
91init_conn(struct nvbios_init *init)
92{
93 struct nouveau_bios *bios = init->bios;
94
95 if (init->outp) {
96 u8 ver, len;
97 u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len);
98 if (conn)
99 return nv_ro08(bios, conn);
100 }
101
102 error("script needs connector type\n");
103 return 0x00;
104}
105
106static inline u32
107init_nvreg(struct nvbios_init *init, u32 reg)
108{
109 /* C51 (at least) sometimes has the lower bits set which the VBIOS
110 * interprets to mean that access needs to go through certain IO
111 * ports instead. The NVIDIA binary driver has been seen to access
112 * these through the NV register address, so lets assume we can
113 * do the same
114 */
115 reg &= ~0x00000003;
116
117 /* GF8+ display scripts need register addresses mangled a bit to
118 * select a specific CRTC/OR
119 */
120 if (nv_device(init->bios)->card_type >= NV_50) {
121 if (reg & 0x80000000) {
122 reg += init_crtc(init) * 0x800;
123 reg &= ~0x80000000;
124 }
125
126 if (reg & 0x40000000) {
127 reg += init_or(init) * 0x800;
128 reg &= ~0x40000000;
129 if (reg & 0x20000000) {
130 reg += init_link(init) * 0x80;
131 reg &= ~0x20000000;
132 }
133 }
134 }
135
136 if (reg & ~0x00fffffc)
137 warn("unknown bits in register 0x%08x\n", reg);
138 return reg;
139}
140
141static u32
142init_rd32(struct nvbios_init *init, u32 reg)
143{
144 reg = init_nvreg(init, reg);
145 if (init_exec(init))
146 return nv_rd32(init->subdev, reg);
147 return 0x00000000;
148}
149
150static void
151init_wr32(struct nvbios_init *init, u32 reg, u32 val)
152{
153 reg = init_nvreg(init, reg);
154 if (init_exec(init))
155 nv_wr32(init->subdev, reg, val);
156}
157
158static u32
159init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
160{
161 reg = init_nvreg(init, reg);
162 if (init_exec(init)) {
163 u32 tmp = nv_rd32(init->subdev, reg);
164 nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
165 return tmp;
166 }
167 return 0x00000000;
168}
169
170static u8
171init_rdport(struct nvbios_init *init, u16 port)
172{
173 if (init_exec(init))
174 return nv_rdport(init->subdev, init->crtc, port);
175 return 0x00;
176}
177
178static void
179init_wrport(struct nvbios_init *init, u16 port, u8 value)
180{
181 if (init_exec(init))
182 nv_wrport(init->subdev, init->crtc, port, value);
183}
184
185static u8
186init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
187{
188 struct nouveau_subdev *subdev = init->subdev;
189 if (init_exec(init)) {
190 int head = init->crtc < 0 ? 0 : init->crtc;
191 return nv_rdvgai(subdev, head, port, index);
192 }
193 return 0x00;
194}
195
196static void
197init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
198{
199 /* force head 0 for updates to cr44, it only exists on first head */
200 if (nv_device(init->subdev)->card_type < NV_50) {
201 if (port == 0x03d4 && index == 0x44)
202 init->crtc = 0;
203 }
204
205 if (init_exec(init)) {
206 int head = init->crtc < 0 ? 0 : init->crtc;
207 nv_wrvgai(init->subdev, head, port, index, value);
208 }
209
210 /* select head 1 if cr44 write selected it */
211 if (nv_device(init->subdev)->card_type < NV_50) {
212 if (port == 0x03d4 && index == 0x44 && value == 3)
213 init->crtc = 1;
214 }
215}
216
217static struct nouveau_i2c_port *
218init_i2c(struct nvbios_init *init, int index)
219{
220 struct nouveau_i2c *i2c = nouveau_i2c(init->bios);
221
222 if (index == 0xff) {
223 index = NV_I2C_DEFAULT(0);
224 if (init->outp && init->outp->i2c_upper_default)
225 index = NV_I2C_DEFAULT(1);
226 } else
227 if (index < 0) {
228 if (!init->outp) {
229 error("script needs output for i2c\n");
230 return NULL;
231 }
232
233 index = init->outp->i2c_index;
234 }
235
236 return i2c->find(i2c, index);
237}
238
239static int
240init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
241{
242 struct nouveau_i2c_port *port = init_i2c(init, index);
243 if (port && init_exec(init))
244 return nv_rdi2cr(port, addr, reg);
245 return -ENODEV;
246}
247
248static int
249init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
250{
251 struct nouveau_i2c_port *port = init_i2c(init, index);
252 if (port && init_exec(init))
253 return nv_wri2cr(port, addr, reg, val);
254 return -ENODEV;
255}
256
257static int
258init_rdauxr(struct nvbios_init *init, u32 addr)
259{
260 struct nouveau_i2c_port *port = init_i2c(init, -1);
261 u8 data;
262
263 if (port && init_exec(init)) {
264 int ret = nv_rdaux(port, addr, &data, 1);
265 if (ret)
266 return ret;
267 return data;
268 }
269
270 return -ENODEV;
271}
272
273static int
274init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
275{
276 struct nouveau_i2c_port *port = init_i2c(init, -1);
277 if (port && init_exec(init))
278 return nv_wraux(port, addr, &data, 1);
279 return -ENODEV;
280}
281
282static void
283init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
284{
285 struct nouveau_clock *clk = nouveau_clock(init->bios);
286 if (clk && clk->pll_set && init_exec(init)) {
287 int ret = clk->pll_set(clk, id, freq);
288 if (ret)
289 warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
290 }
291}
292
293/******************************************************************************
294 * parsing of bios structures that are required to execute init tables
295 *****************************************************************************/
296
297static u16
298init_table(struct nouveau_bios *bios, u16 *len)
299{
300 struct bit_entry bit_I;
301
302 if (!bit_entry(bios, 'I', &bit_I)) {
303 *len = bit_I.length;
304 return bit_I.offset;
305 }
306
307 if (bmp_version(bios) >= 0x0510) {
308 *len = 14;
309 return bios->bmp_offset + 75;
310 }
311
312 return 0x0000;
313}
314
315static u16
316init_table_(struct nvbios_init *init, u16 offset, const char *name)
317{
318 struct nouveau_bios *bios = init->bios;
319 u16 len, data = init_table(bios, &len);
320 if (data) {
321 if (len >= offset + 2) {
322 data = nv_ro16(bios, data + offset);
323 if (data)
324 return data;
325
326 warn("%s pointer invalid\n", name);
327 return 0x0000;
328 }
329
330 warn("init data too short for %s pointer", name);
331 return 0x0000;
332 }
333
334 warn("init data not found\n");
335 return 0x0000;
336}
337
338#define init_script_table(b) init_table_((b), 0x00, "script table")
339#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table")
340#define init_macro_table(b) init_table_((b), 0x04, "macro table")
341#define init_condition_table(b) init_table_((b), 0x06, "condition table")
342#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
343#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table")
344#define init_function_table(b) init_table_((b), 0x0c, "function table")
345#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
346
347static u16
348init_script(struct nouveau_bios *bios, int index)
349{
350 struct nvbios_init init = { .bios = bios };
351 u16 data;
352
353 if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
354 if (index > 1)
355 return 0x0000;
356
357 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
358 return nv_ro16(bios, data + (index * 2));
359 }
360
361 data = init_script_table(&init);
362 if (data)
363 return nv_ro16(bios, data + (index * 2));
364
365 return 0x0000;
366}
367
368static u16
369init_unknown_script(struct nouveau_bios *bios)
370{
371 u16 len, data = init_table(bios, &len);
372 if (data && len >= 16)
373 return nv_ro16(bios, data + 14);
374 return 0x0000;
375}
376
377static u16
378init_ram_restrict_table(struct nvbios_init *init)
379{
380 struct nouveau_bios *bios = init->bios;
381 struct bit_entry bit_M;
382 u16 data = 0x0000;
383
384 if (!bit_entry(bios, 'M', &bit_M)) {
385 if (bit_M.version == 1 && bit_M.length >= 5)
386 data = nv_ro16(bios, bit_M.offset + 3);
387 if (bit_M.version == 2 && bit_M.length >= 3)
388 data = nv_ro16(bios, bit_M.offset + 1);
389 }
390
391 if (data == 0x0000)
392 warn("ram restrict table not found\n");
393 return data;
394}
395
396static u8
397init_ram_restrict_group_count(struct nvbios_init *init)
398{
399 struct nouveau_bios *bios = init->bios;
400 struct bit_entry bit_M;
401
402 if (!bit_entry(bios, 'M', &bit_M)) {
403 if (bit_M.version == 1 && bit_M.length >= 5)
404 return nv_ro08(bios, bit_M.offset + 2);
405 if (bit_M.version == 2 && bit_M.length >= 3)
406 return nv_ro08(bios, bit_M.offset + 0);
407 }
408
409 return 0x00;
410}
411
412static u8
413init_ram_restrict(struct nvbios_init *init)
414{
415 u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
416 u16 table = init_ram_restrict_table(init);
417 if (table)
418 return nv_ro08(init->bios, table + strap);
419 return 0x00;
420}
421
422static u8
423init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
424{
425 struct nouveau_bios *bios = init->bios;
426 u16 table = init_xlat_table(init);
427 if (table) {
428 u16 data = nv_ro16(bios, table + (index * 2));
429 if (data)
430 return nv_ro08(bios, data + offset);
431 warn("xlat table pointer %d invalid\n", index);
432 }
433 return 0x00;
434}
435
436/******************************************************************************
437 * utility functions used by various init opcode handlers
438 *****************************************************************************/
439
440static bool
441init_condition_met(struct nvbios_init *init, u8 cond)
442{
443 struct nouveau_bios *bios = init->bios;
444 u16 table = init_condition_table(init);
445 if (table) {
446 u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
447 u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
448 u32 val = nv_ro32(bios, table + (cond * 12) + 8);
449 trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
450 cond, reg, msk, val);
451 return (init_rd32(init, reg) & msk) == val;
452 }
453 return false;
454}
455
456static bool
457init_io_condition_met(struct nvbios_init *init, u8 cond)
458{
459 struct nouveau_bios *bios = init->bios;
460 u16 table = init_io_condition_table(init);
461 if (table) {
462 u16 port = nv_ro16(bios, table + (cond * 5) + 0);
463 u8 index = nv_ro08(bios, table + (cond * 5) + 2);
464 u8 mask = nv_ro08(bios, table + (cond * 5) + 3);
465 u8 value = nv_ro08(bios, table + (cond * 5) + 4);
466 trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
467 cond, port, index, mask, value);
468 return (init_rdvgai(init, port, index) & mask) == value;
469 }
470 return false;
471}
472
473static bool
474init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
475{
476 struct nouveau_bios *bios = init->bios;
477 u16 table = init_io_flag_condition_table(init);
478 if (table) {
479 u16 port = nv_ro16(bios, table + (cond * 9) + 0);
480 u8 index = nv_ro08(bios, table + (cond * 9) + 2);
481 u8 mask = nv_ro08(bios, table + (cond * 9) + 3);
482 u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
483 u16 data = nv_ro16(bios, table + (cond * 9) + 5);
484 u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
485 u8 value = nv_ro08(bios, table + (cond * 9) + 8);
486 u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
487 return (nv_ro08(bios, data + ioval) & dmask) == value;
488 }
489 return false;
490}
491
492static inline u32
493init_shift(u32 data, u8 shift)
494{
495 if (shift < 0x80)
496 return data >> shift;
497 return data << (0x100 - shift);
498}
499
500static u32
501init_tmds_reg(struct nvbios_init *init, u8 tmds)
502{
503 /* For mlv < 0x80, it is an index into a table of TMDS base addresses.
504 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
505 * CR58 for CR57 = 0 to index a table of offsets to the basic
506 * 0x6808b0 address.
507 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
508 * CR58 for CR57 = 0 to index a table of offsets to the basic
509 * 0x6808b0 address, and then flip the offset by 8.
510 */
511
512 const int pramdac_offset[13] = {
513 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
514 const u32 pramdac_table[4] = {
515 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
516
517 if (tmds >= 0x80) {
518 if (init->outp) {
519 u32 dacoffset = pramdac_offset[init->outp->or];
520 if (tmds == 0x81)
521 dacoffset ^= 8;
522 return 0x6808b0 + dacoffset;
523 }
524
525 error("tmds opcodes need dcb\n");
526 } else {
527 if (tmds < ARRAY_SIZE(pramdac_table))
528 return pramdac_table[tmds];
529
530 error("tmds selector 0x%02x unknown\n", tmds);
531 }
532
533 return 0;
534}
535
536/******************************************************************************
537 * init opcode handlers
538 *****************************************************************************/
539
540/**
541 * init_reserved - stub for various unknown/unused single-byte opcodes
542 *
543 */
544static void
545init_reserved(struct nvbios_init *init)
546{
547 u8 opcode = nv_ro08(init->bios, init->offset);
548 trace("RESERVED\t0x%02x\n", opcode);
549 init->offset += 1;
550}
551
552/**
553 * INIT_DONE - opcode 0x71
554 *
555 */
556static void
557init_done(struct nvbios_init *init)
558{
559 trace("DONE\n");
560 init->offset = 0x0000;
561}
562
563/**
564 * INIT_IO_RESTRICT_PROG - opcode 0x32
565 *
566 */
567static void
568init_io_restrict_prog(struct nvbios_init *init)
569{
570 struct nouveau_bios *bios = init->bios;
571 u16 port = nv_ro16(bios, init->offset + 1);
572 u8 index = nv_ro08(bios, init->offset + 3);
573 u8 mask = nv_ro08(bios, init->offset + 4);
574 u8 shift = nv_ro08(bios, init->offset + 5);
575 u8 count = nv_ro08(bios, init->offset + 6);
576 u32 reg = nv_ro32(bios, init->offset + 7);
577 u8 conf, i;
578
579 trace("IO_RESTRICT_PROG\tR[0x%06x] = "
580 "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n",
581 reg, port, index, mask, shift);
582 init->offset += 11;
583
584 conf = (init_rdvgai(init, port, index) & mask) >> shift;
585 for (i = 0; i < count; i++) {
586 u32 data = nv_ro32(bios, init->offset);
587
588 if (i == conf) {
589 trace("\t0x%08x *\n", data);
590 init_wr32(init, reg, data);
591 } else {
592 trace("\t0x%08x\n", data);
593 }
594
595 init->offset += 4;
596 }
597 trace("}]\n");
598}
599
600/**
601 * INIT_REPEAT - opcode 0x33
602 *
603 */
604static void
605init_repeat(struct nvbios_init *init)
606{
607 struct nouveau_bios *bios = init->bios;
608 u8 count = nv_ro08(bios, init->offset + 1);
609 u16 repeat = init->repeat;
610
611 trace("REPEAT\t0x%02x\n", count);
612 init->offset += 2;
613
614 init->repeat = init->offset;
615 init->repend = init->offset;
616 while (count--) {
617 init->offset = init->repeat;
618 nvbios_exec(init);
619 if (count)
620 trace("REPEAT\t0x%02x\n", count);
621 }
622 init->offset = init->repend;
623 init->repeat = repeat;
624}
625
626/**
627 * INIT_IO_RESTRICT_PLL - opcode 0x34
628 *
629 */
630static void
631init_io_restrict_pll(struct nvbios_init *init)
632{
633 struct nouveau_bios *bios = init->bios;
634 u16 port = nv_ro16(bios, init->offset + 1);
635 u8 index = nv_ro08(bios, init->offset + 3);
636 u8 mask = nv_ro08(bios, init->offset + 4);
637 u8 shift = nv_ro08(bios, init->offset + 5);
638 s8 iofc = nv_ro08(bios, init->offset + 6);
639 u8 count = nv_ro08(bios, init->offset + 7);
640 u32 reg = nv_ro32(bios, init->offset + 8);
641 u8 conf, i;
642
643 trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
644 "((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n",
645 reg, port, index, mask, shift, iofc);
646 init->offset += 12;
647
648 conf = (init_rdvgai(init, port, index) & mask) >> shift;
649 for (i = 0; i < count; i++) {
650 u32 freq = nv_ro16(bios, init->offset) * 10;
651
652 if (i == conf) {
653 trace("\t%dkHz *\n", freq);
654 if (iofc > 0 && init_io_flag_condition_met(init, iofc))
655 freq *= 2;
656 init_prog_pll(init, reg, freq);
657 } else {
658 trace("\t%dkHz\n", freq);
659 }
660
661 init->offset += 2;
662 }
663 trace("}]\n");
664}
665
666/**
667 * INIT_END_REPEAT - opcode 0x36
668 *
669 */
670static void
671init_end_repeat(struct nvbios_init *init)
672{
673 trace("END_REPEAT\n");
674 init->offset += 1;
675
676 if (init->repeat) {
677 init->repend = init->offset;
678 init->offset = 0;
679 }
680}
681
682/**
683 * INIT_COPY - opcode 0x37
684 *
685 */
686static void
687init_copy(struct nvbios_init *init)
688{
689 struct nouveau_bios *bios = init->bios;
690 u32 reg = nv_ro32(bios, init->offset + 1);
691 u8 shift = nv_ro08(bios, init->offset + 5);
692 u8 smask = nv_ro08(bios, init->offset + 6);
693 u16 port = nv_ro16(bios, init->offset + 7);
694 u8 index = nv_ro08(bios, init->offset + 9);
695 u8 mask = nv_ro08(bios, init->offset + 10);
696 u8 data;
697
698 trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
699 "((R[0x%06x] %s 0x%02x) & 0x%02x)\n",
700 port, index, mask, reg, (shift & 0x80) ? "<<" : ">>",
701 (shift & 0x80) ? (0x100 - shift) : shift, smask);
702 init->offset += 11;
703
704 data = init_rdvgai(init, port, index) & mask;
705 data |= init_shift(init_rd32(init, reg), shift) & smask;
706 init_wrvgai(init, port, index, data);
707}
708
709/**
710 * INIT_NOT - opcode 0x38
711 *
712 */
713static void
714init_not(struct nvbios_init *init)
715{
716 trace("NOT\n");
717 init->offset += 1;
718 init_exec_inv(init);
719}
720
721/**
722 * INIT_IO_FLAG_CONDITION - opcode 0x39
723 *
724 */
725static void
726init_io_flag_condition(struct nvbios_init *init)
727{
728 struct nouveau_bios *bios = init->bios;
729 u8 cond = nv_ro08(bios, init->offset + 1);
730
731 trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
732 init->offset += 2;
733
734 if (!init_io_flag_condition_met(init, cond))
735 init_exec_set(init, false);
736}
737
738/**
739 * INIT_DP_CONDITION - opcode 0x3a
740 *
741 */
742static void
743init_dp_condition(struct nvbios_init *init)
744{
745 struct nouveau_bios *bios = init->bios;
746 u8 cond = nv_ro08(bios, init->offset + 1);
747 u8 unkn = nv_ro08(bios, init->offset + 2);
748 u8 ver, len;
749 u16 data;
750
751 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
752 init->offset += 3;
753
754 switch (cond) {
755 case 0:
756 if (init_conn(init) != DCB_CONNECTOR_eDP)
757 init_exec_set(init, false);
758 break;
759 case 1:
760 case 2:
761 if ( init->outp &&
762 (data = dp_outp_match(bios, init->outp, &ver, &len))) {
763 if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
764 init_exec_set(init, false);
765 if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
766 init_exec_set(init, false);
767 break;
768 }
769
770 warn("script needs dp output table data\n");
771 break;
772 case 5:
773 if (!(init_rdauxr(init, 0x0d) & 1))
774 init_exec_set(init, false);
775 break;
776 default:
777 warn("unknown dp condition 0x%02x\n", cond);
778 break;
779 }
780}
781
782/**
783 * INIT_IO_MASK_OR - opcode 0x3b
784 *
785 */
786static void
787init_io_mask_or(struct nvbios_init *init)
788{
789 struct nouveau_bios *bios = init->bios;
790 u8 index = nv_ro08(bios, init->offset + 1);
791 u8 or = init_or(init);
792 u8 data;
793
794 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or);
795 init->offset += 2;
796
797 data = init_rdvgai(init, 0x03d4, index);
798 init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
799}
800
801/**
802 * INIT_IO_OR - opcode 0x3c
803 *
804 */
805static void
806init_io_or(struct nvbios_init *init)
807{
808 struct nouveau_bios *bios = init->bios;
809 u8 index = nv_ro08(bios, init->offset + 1);
810 u8 or = init_or(init);
811 u8 data;
812
813 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or);
814 init->offset += 2;
815
816 data = init_rdvgai(init, 0x03d4, index);
817 init_wrvgai(init, 0x03d4, index, data | (1 << or));
818}
819
820/**
821 * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
822 *
823 */
824static void
825init_idx_addr_latched(struct nvbios_init *init)
826{
827 struct nouveau_bios *bios = init->bios;
828 u32 creg = nv_ro32(bios, init->offset + 1);
829 u32 dreg = nv_ro32(bios, init->offset + 5);
830 u32 mask = nv_ro32(bios, init->offset + 9);
831 u32 data = nv_ro32(bios, init->offset + 13);
832 u8 count = nv_ro08(bios, init->offset + 17);
833
834 trace("INDEX_ADDRESS_LATCHED\t"
835 "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n",
836 creg, dreg, mask, data);
837 init->offset += 18;
838
839 while (count--) {
840 u8 iaddr = nv_ro08(bios, init->offset + 0);
841 u8 idata = nv_ro08(bios, init->offset + 1);
842
843 trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
844 init->offset += 2;
845
846 init_wr32(init, dreg, idata);
847 init_mask(init, creg, ~mask, data | idata);
848 }
849}
850
851/**
852 * INIT_IO_RESTRICT_PLL2 - opcode 0x4a
853 *
854 */
855static void
856init_io_restrict_pll2(struct nvbios_init *init)
857{
858 struct nouveau_bios *bios = init->bios;
859 u16 port = nv_ro16(bios, init->offset + 1);
860 u8 index = nv_ro08(bios, init->offset + 3);
861 u8 mask = nv_ro08(bios, init->offset + 4);
862 u8 shift = nv_ro08(bios, init->offset + 5);
863 u8 count = nv_ro08(bios, init->offset + 6);
864 u32 reg = nv_ro32(bios, init->offset + 7);
865 u8 conf, i;
866
867 trace("IO_RESTRICT_PLL2\t"
868 "R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n",
869 reg, port, index, mask, shift);
870 init->offset += 11;
871
872 conf = (init_rdvgai(init, port, index) & mask) >> shift;
873 for (i = 0; i < count; i++) {
874 u32 freq = nv_ro32(bios, init->offset);
875 if (i == conf) {
876 trace("\t%dkHz *\n", freq);
877 init_prog_pll(init, reg, freq);
878 } else {
879 trace("\t%dkHz\n", freq);
880 }
881 init->offset += 4;
882 }
883 trace("}]\n");
884}
885
886/**
887 * INIT_PLL2 - opcode 0x4b
888 *
889 */
890static void
891init_pll2(struct nvbios_init *init)
892{
893 struct nouveau_bios *bios = init->bios;
894 u32 reg = nv_ro32(bios, init->offset + 1);
895 u32 freq = nv_ro32(bios, init->offset + 5);
896
897 trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
898 init->offset += 9;
899
900 init_prog_pll(init, reg, freq);
901}
902
903/**
904 * INIT_I2C_BYTE - opcode 0x4c
905 *
906 */
907static void
908init_i2c_byte(struct nvbios_init *init)
909{
910 struct nouveau_bios *bios = init->bios;
911 u8 index = nv_ro08(bios, init->offset + 1);
912 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
913 u8 count = nv_ro08(bios, init->offset + 3);
914
915 trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
916 init->offset += 4;
917
918 while (count--) {
919 u8 reg = nv_ro08(bios, init->offset + 0);
920 u8 mask = nv_ro08(bios, init->offset + 1);
921 u8 data = nv_ro08(bios, init->offset + 2);
922 int val;
923
924 trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
925 init->offset += 3;
926
927 val = init_rdi2cr(init, index, addr, reg);
928 if (val < 0)
929 continue;
930 init_wri2cr(init, index, addr, reg, (val & mask) | data);
931 }
932}
933
934/**
935 * INIT_ZM_I2C_BYTE - opcode 0x4d
936 *
937 */
938static void
939init_zm_i2c_byte(struct nvbios_init *init)
940{
941 struct nouveau_bios *bios = init->bios;
942 u8 index = nv_ro08(bios, init->offset + 1);
943 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
944 u8 count = nv_ro08(bios, init->offset + 3);
945
946 trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
947 init->offset += 4;
948
949 while (count--) {
950 u8 reg = nv_ro08(bios, init->offset + 0);
951 u8 data = nv_ro08(bios, init->offset + 1);
952
953 trace("\t[0x%02x] = 0x%02x\n", reg, data);
954 init->offset += 2;
955
956 init_wri2cr(init, index, addr, reg, data);
957 }
958
959}
960
961/**
962 * INIT_ZM_I2C - opcode 0x4e
963 *
964 */
965static void
966init_zm_i2c(struct nvbios_init *init)
967{
968 struct nouveau_bios *bios = init->bios;
969 u8 index = nv_ro08(bios, init->offset + 1);
970 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
971 u8 count = nv_ro08(bios, init->offset + 3);
972 u8 data[256], i;
973
974 trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
975 init->offset += 4;
976
977 for (i = 0; i < count; i++) {
978 data[i] = nv_ro08(bios, init->offset);
979 trace("\t0x%02x\n", data[i]);
980 init->offset++;
981 }
982
983 if (init_exec(init)) {
984 struct nouveau_i2c_port *port = init_i2c(init, index);
985 struct i2c_msg msg = {
986 .addr = addr, .flags = 0, .len = count, .buf = data,
987 };
988 int ret;
989
990 if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
991 warn("i2c wr failed, %d\n", ret);
992 }
993}
994
995/**
996 * INIT_TMDS - opcode 0x4f
997 *
998 */
999static void
1000init_tmds(struct nvbios_init *init)
1001{
1002 struct nouveau_bios *bios = init->bios;
1003 u8 tmds = nv_ro08(bios, init->offset + 1);
1004 u8 addr = nv_ro08(bios, init->offset + 2);
1005 u8 mask = nv_ro08(bios, init->offset + 3);
1006 u8 data = nv_ro08(bios, init->offset + 4);
1007 u32 reg = init_tmds_reg(init, tmds);
1008
1009 trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
1010 tmds, addr, mask, data);
1011 init->offset += 5;
1012
1013 if (reg == 0)
1014 return;
1015
1016 init_wr32(init, reg + 0, addr | 0x00010000);
1017 init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask));
1018 init_wr32(init, reg + 0, addr);
1019}
1020
1021/**
1022 * INIT_ZM_TMDS_GROUP - opcode 0x50
1023 *
1024 */
1025static void
1026init_zm_tmds_group(struct nvbios_init *init)
1027{
1028 struct nouveau_bios *bios = init->bios;
1029 u8 tmds = nv_ro08(bios, init->offset + 1);
1030 u8 count = nv_ro08(bios, init->offset + 2);
1031 u32 reg = init_tmds_reg(init, tmds);
1032
1033 trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
1034 init->offset += 3;
1035
1036 while (count--) {
1037 u8 addr = nv_ro08(bios, init->offset + 0);
1038 u8 data = nv_ro08(bios, init->offset + 1);
1039
1040 trace("\t[0x%02x] = 0x%02x\n", addr, data);
1041 init->offset += 2;
1042
1043 init_wr32(init, reg + 4, data);
1044 init_wr32(init, reg + 0, addr);
1045 }
1046}
1047
1048/**
1049 * INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
1050 *
1051 */
1052static void
1053init_cr_idx_adr_latch(struct nvbios_init *init)
1054{
1055 struct nouveau_bios *bios = init->bios;
1056 u8 addr0 = nv_ro08(bios, init->offset + 1);
1057 u8 addr1 = nv_ro08(bios, init->offset + 2);
1058 u8 base = nv_ro08(bios, init->offset + 3);
1059 u8 count = nv_ro08(bios, init->offset + 4);
1060 u8 save0;
1061
1062 trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
1063 init->offset += 5;
1064
1065 save0 = init_rdvgai(init, 0x03d4, addr0);
1066 while (count--) {
1067 u8 data = nv_ro08(bios, init->offset);
1068
1069 trace("\t\t[0x%02x] = 0x%02x\n", base, data);
1070 init->offset += 1;
1071
1072 init_wrvgai(init, 0x03d4, addr0, base++);
1073 init_wrvgai(init, 0x03d4, addr1, data);
1074 }
1075 init_wrvgai(init, 0x03d4, addr0, save0);
1076}
1077
1078/**
1079 * INIT_CR - opcode 0x52
1080 *
1081 */
1082static void
1083init_cr(struct nvbios_init *init)
1084{
1085 struct nouveau_bios *bios = init->bios;
1086 u8 addr = nv_ro08(bios, init->offset + 1);
1087 u8 mask = nv_ro08(bios, init->offset + 2);
1088 u8 data = nv_ro08(bios, init->offset + 3);
1089 u8 val;
1090
1091 trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
1092 init->offset += 4;
1093
1094 val = init_rdvgai(init, 0x03d4, addr) & mask;
1095 init_wrvgai(init, 0x03d4, addr, val | data);
1096}
1097
1098/**
1099 * INIT_ZM_CR - opcode 0x53
1100 *
1101 */
1102static void
1103init_zm_cr(struct nvbios_init *init)
1104{
1105 struct nouveau_bios *bios = init->bios;
1106 u8 addr = nv_ro08(bios, init->offset + 1);
1107 u8 data = nv_ro08(bios, init->offset + 2);
1108
1109 trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data);
1110 init->offset += 3;
1111
1112 init_wrvgai(init, 0x03d4, addr, data);
1113}
1114
1115/**
1116 * INIT_ZM_CR_GROUP - opcode 0x54
1117 *
1118 */
1119static void
1120init_zm_cr_group(struct nvbios_init *init)
1121{
1122 struct nouveau_bios *bios = init->bios;
1123 u8 count = nv_ro08(bios, init->offset + 1);
1124
1125 trace("ZM_CR_GROUP\n");
1126 init->offset += 2;
1127
1128 while (count--) {
1129 u8 addr = nv_ro08(bios, init->offset + 0);
1130 u8 data = nv_ro08(bios, init->offset + 1);
1131
1132 trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
1133 init->offset += 2;
1134
1135 init_wrvgai(init, 0x03d4, addr, data);
1136 }
1137}
1138
1139/**
1140 * INIT_CONDITION_TIME - opcode 0x56
1141 *
1142 */
1143static void
1144init_condition_time(struct nvbios_init *init)
1145{
1146 struct nouveau_bios *bios = init->bios;
1147 u8 cond = nv_ro08(bios, init->offset + 1);
1148 u8 retry = nv_ro08(bios, init->offset + 2);
1149 u8 wait = min((u16)retry * 50, 100);
1150
1151 trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
1152 init->offset += 3;
1153
1154 if (!init_exec(init))
1155 return;
1156
1157 while (wait--) {
1158 if (init_condition_met(init, cond))
1159 return;
1160 mdelay(20);
1161 }
1162
1163 init_exec_set(init, false);
1164}
1165
1166/**
1167 * INIT_LTIME - opcode 0x57
1168 *
1169 */
1170static void
1171init_ltime(struct nvbios_init *init)
1172{
1173 struct nouveau_bios *bios = init->bios;
1174 u16 msec = nv_ro16(bios, init->offset + 1);
1175
1176 trace("LTIME\t0x%04x\n", msec);
1177 init->offset += 3;
1178
1179 if (init_exec(init))
1180 mdelay(msec);
1181}
1182
1183/**
1184 * INIT_ZM_REG_SEQUENCE - opcode 0x58
1185 *
1186 */
1187static void
1188init_zm_reg_sequence(struct nvbios_init *init)
1189{
1190 struct nouveau_bios *bios = init->bios;
1191 u32 base = nv_ro32(bios, init->offset + 1);
1192 u8 count = nv_ro08(bios, init->offset + 5);
1193
1194 trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
1195 init->offset += 6;
1196
1197 while (count--) {
1198 u32 data = nv_ro32(bios, init->offset);
1199
1200 trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
1201 init->offset += 4;
1202
1203 init_wr32(init, base, data);
1204 base += 4;
1205 }
1206}
1207
1208/**
1209 * INIT_SUB_DIRECT - opcode 0x5b
1210 *
1211 */
1212static void
1213init_sub_direct(struct nvbios_init *init)
1214{
1215 struct nouveau_bios *bios = init->bios;
1216 u16 addr = nv_ro16(bios, init->offset + 1);
1217 u16 save;
1218
1219 trace("SUB_DIRECT\t0x%04x\n", addr);
1220
1221 if (init_exec(init)) {
1222 save = init->offset;
1223 init->offset = addr;
1224 if (nvbios_exec(init)) {
1225 error("error parsing sub-table\n");
1226 return;
1227 }
1228 init->offset = save;
1229 }
1230
1231 init->offset += 3;
1232}
1233
1234/**
1235 * INIT_JUMP - opcode 0x5c
1236 *
1237 */
1238static void
1239init_jump(struct nvbios_init *init)
1240{
1241 struct nouveau_bios *bios = init->bios;
1242 u16 offset = nv_ro16(bios, init->offset + 1);
1243
1244 trace("JUMP\t0x%04x\n", offset);
1245 init->offset = offset;
1246}
1247
1248/**
1249 * INIT_I2C_IF - opcode 0x5e
1250 *
1251 */
1252static void
1253init_i2c_if(struct nvbios_init *init)
1254{
1255 struct nouveau_bios *bios = init->bios;
1256 u8 index = nv_ro08(bios, init->offset + 1);
1257 u8 addr = nv_ro08(bios, init->offset + 2);
1258 u8 reg = nv_ro08(bios, init->offset + 3);
1259 u8 mask = nv_ro08(bios, init->offset + 4);
1260 u8 data = nv_ro08(bios, init->offset + 5);
1261 u8 value;
1262
1263 trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
1264 index, addr, reg, mask, data);
1265 init->offset += 6;
1266 init_exec_force(init, true);
1267
1268 value = init_rdi2cr(init, index, addr, reg);
1269 if ((value & mask) != data)
1270 init_exec_set(init, false);
1271
1272 init_exec_force(init, false);
1273}
1274
1275/**
1276 * INIT_COPY_NV_REG - opcode 0x5f
1277 *
1278 */
1279static void
1280init_copy_nv_reg(struct nvbios_init *init)
1281{
1282 struct nouveau_bios *bios = init->bios;
1283 u32 sreg = nv_ro32(bios, init->offset + 1);
1284 u8 shift = nv_ro08(bios, init->offset + 5);
1285 u32 smask = nv_ro32(bios, init->offset + 6);
1286 u32 sxor = nv_ro32(bios, init->offset + 10);
1287 u32 dreg = nv_ro32(bios, init->offset + 14);
1288 u32 dmask = nv_ro32(bios, init->offset + 18);
1289 u32 data;
1290
1291 trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
1292 "((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n",
1293 dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>",
1294 (shift & 0x80) ? (0x100 - shift) : shift, smask, sxor);
1295 init->offset += 22;
1296
1297 data = init_shift(init_rd32(init, sreg), shift);
1298 init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
1299}
1300
1301/**
1302 * INIT_ZM_INDEX_IO - opcode 0x62
1303 *
1304 */
1305static void
1306init_zm_index_io(struct nvbios_init *init)
1307{
1308 struct nouveau_bios *bios = init->bios;
1309 u16 port = nv_ro16(bios, init->offset + 1);
1310 u8 index = nv_ro08(bios, init->offset + 3);
1311 u8 data = nv_ro08(bios, init->offset + 4);
1312
1313 trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
1314 init->offset += 5;
1315
1316 init_wrvgai(init, port, index, data);
1317}
1318
1319/**
1320 * INIT_COMPUTE_MEM - opcode 0x63
1321 *
1322 */
1323static void
1324init_compute_mem(struct nvbios_init *init)
1325{
1326 struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
1327
1328 trace("COMPUTE_MEM\n");
1329 init->offset += 1;
1330
1331 init_exec_force(init, true);
1332 if (init_exec(init) && devinit->meminit)
1333 devinit->meminit(devinit);
1334 init_exec_force(init, false);
1335}
1336
1337/**
1338 * INIT_RESET - opcode 0x65
1339 *
1340 */
1341static void
1342init_reset(struct nvbios_init *init)
1343{
1344 struct nouveau_bios *bios = init->bios;
1345 u32 reg = nv_ro32(bios, init->offset + 1);
1346 u32 data1 = nv_ro32(bios, init->offset + 5);
1347 u32 data2 = nv_ro32(bios, init->offset + 9);
1348 u32 savepci19;
1349
1350 trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
1351 init->offset += 13;
1352 init_exec_force(init, true);
1353
1354 savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000);
1355 init_wr32(init, reg, data1);
1356 udelay(10);
1357 init_wr32(init, reg, data2);
1358 init_wr32(init, 0x00184c, savepci19);
1359 init_mask(init, 0x001850, 0x00000001, 0x00000000);
1360
1361 init_exec_force(init, false);
1362}
1363
1364/**
1365 * INIT_CONFIGURE_MEM - opcode 0x66
1366 *
1367 */
1368static u16
1369init_configure_mem_clk(struct nvbios_init *init)
1370{
1371 u16 mdata = bmp_mem_init_table(init->bios);
1372 if (mdata)
1373 mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66;
1374 return mdata;
1375}
1376
1377static void
1378init_configure_mem(struct nvbios_init *init)
1379{
1380 struct nouveau_bios *bios = init->bios;
1381 u16 mdata, sdata;
1382 u32 addr, data;
1383
1384 trace("CONFIGURE_MEM\n");
1385 init->offset += 1;
1386
1387 if (bios->version.major > 2) {
1388 init_done(init);
1389 return;
1390 }
1391 init_exec_force(init, true);
1392
1393 mdata = init_configure_mem_clk(init);
1394 sdata = bmp_sdr_seq_table(bios);
1395 if (nv_ro08(bios, mdata) & 0x01)
1396 sdata = bmp_ddr_seq_table(bios);
1397 mdata += 6; /* skip to data */
1398
1399 data = init_rdvgai(init, 0x03c4, 0x01);
1400 init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
1401
1402 while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
1403 switch (addr) {
1404 case 0x10021c: /* CKE_NORMAL */
1405 case 0x1002d0: /* CMD_REFRESH */
1406 case 0x1002d4: /* CMD_PRECHARGE */
1407 data = 0x00000001;
1408 break;
1409 default:
1410 data = nv_ro32(bios, mdata);
1411 mdata += 4;
1412 if (data == 0xffffffff)
1413 continue;
1414 break;
1415 }
1416
1417 init_wr32(init, addr, data);
1418 }
1419
1420 init_exec_force(init, false);
1421}
1422
1423/**
1424 * INIT_CONFIGURE_CLK - opcode 0x67
1425 *
1426 */
1427static void
1428init_configure_clk(struct nvbios_init *init)
1429{
1430 struct nouveau_bios *bios = init->bios;
1431 u16 mdata, clock;
1432
1433 trace("CONFIGURE_CLK\n");
1434 init->offset += 1;
1435
1436 if (bios->version.major > 2) {
1437 init_done(init);
1438 return;
1439 }
1440 init_exec_force(init, true);
1441
1442 mdata = init_configure_mem_clk(init);
1443
1444 /* NVPLL */
1445 clock = nv_ro16(bios, mdata + 4) * 10;
1446 init_prog_pll(init, 0x680500, clock);
1447
1448 /* MPLL */
1449 clock = nv_ro16(bios, mdata + 2) * 10;
1450 if (nv_ro08(bios, mdata) & 0x01)
1451 clock *= 2;
1452 init_prog_pll(init, 0x680504, clock);
1453
1454 init_exec_force(init, false);
1455}
1456
1457/**
1458 * INIT_CONFIGURE_PREINIT - opcode 0x68
1459 *
1460 */
1461static void
1462init_configure_preinit(struct nvbios_init *init)
1463{
1464 struct nouveau_bios *bios = init->bios;
1465 u32 strap;
1466
1467 trace("CONFIGURE_PREINIT\n");
1468 init->offset += 1;
1469
1470 if (bios->version.major > 2) {
1471 init_done(init);
1472 return;
1473 }
1474 init_exec_force(init, true);
1475
1476 strap = init_rd32(init, 0x101000);
1477 strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6);
1478 init_wrvgai(init, 0x03d4, 0x3c, strap);
1479
1480 init_exec_force(init, false);
1481}
1482
1483/**
1484 * INIT_IO - opcode 0x69
1485 *
1486 */
1487static void
1488init_io(struct nvbios_init *init)
1489{
1490 struct nouveau_bios *bios = init->bios;
1491 u16 port = nv_ro16(bios, init->offset + 1);
1492 u8 mask = nv_ro16(bios, init->offset + 3);
1493 u8 data = nv_ro16(bios, init->offset + 4);
1494 u8 value;
1495
1496 trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
1497 init->offset += 5;
1498
1499 /* ummm.. yes.. should really figure out wtf this is and why it's
1500 * needed some day.. it's almost certainly wrong, but, it also
1501 * somehow makes things work...
1502 */
1503 if (nv_device(init->bios)->card_type >= NV_50 &&
1504 port == 0x03c3 && data == 0x01) {
1505 init_mask(init, 0x614100, 0xf0800000, 0x00800000);
1506 init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
1507 init_mask(init, 0x614900, 0xf0800000, 0x00800000);
1508 init_mask(init, 0x000200, 0x40000000, 0x00000000);
1509 mdelay(10);
1510 init_mask(init, 0x00e18c, 0x00020000, 0x00000000);
1511 init_mask(init, 0x000200, 0x40000000, 0x40000000);
1512 init_wr32(init, 0x614100, 0x00800018);
1513 init_wr32(init, 0x614900, 0x00800018);
1514 mdelay(10);
1515 init_wr32(init, 0x614100, 0x10000018);
1516 init_wr32(init, 0x614900, 0x10000018);
1517 return;
1518 }
1519
1520 value = init_rdport(init, port) & mask;
1521 init_wrport(init, port, data | value);
1522}
1523
1524/**
1525 * INIT_SUB - opcode 0x6b
1526 *
1527 */
1528static void
1529init_sub(struct nvbios_init *init)
1530{
1531 struct nouveau_bios *bios = init->bios;
1532 u8 index = nv_ro08(bios, init->offset + 1);
1533 u16 addr, save;
1534
1535 trace("SUB\t0x%02x\n", index);
1536
1537 addr = init_script(bios, index);
1538 if (addr && init_exec(init)) {
1539 save = init->offset;
1540 init->offset = addr;
1541 if (nvbios_exec(init)) {
1542 error("error parsing sub-table\n");
1543 return;
1544 }
1545 init->offset = save;
1546 }
1547
1548 init->offset += 2;
1549}
1550
1551/**
1552 * INIT_RAM_CONDITION - opcode 0x6d
1553 *
1554 */
1555static void
1556init_ram_condition(struct nvbios_init *init)
1557{
1558 struct nouveau_bios *bios = init->bios;
1559 u8 mask = nv_ro08(bios, init->offset + 1);
1560 u8 value = nv_ro08(bios, init->offset + 2);
1561
1562 trace("RAM_CONDITION\t"
1563 "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
1564 init->offset += 3;
1565
1566 if ((init_rd32(init, 0x100000) & mask) != value)
1567 init_exec_set(init, false);
1568}
1569
1570/**
1571 * INIT_NV_REG - opcode 0x6e
1572 *
1573 */
1574static void
1575init_nv_reg(struct nvbios_init *init)
1576{
1577 struct nouveau_bios *bios = init->bios;
1578 u32 reg = nv_ro32(bios, init->offset + 1);
1579 u32 mask = nv_ro32(bios, init->offset + 5);
1580 u32 data = nv_ro32(bios, init->offset + 9);
1581
1582 trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
1583 init->offset += 13;
1584
1585 init_mask(init, reg, ~mask, data);
1586}
1587
1588/**
1589 * INIT_MACRO - opcode 0x6f
1590 *
1591 */
1592static void
1593init_macro(struct nvbios_init *init)
1594{
1595 struct nouveau_bios *bios = init->bios;
1596 u8 macro = nv_ro08(bios, init->offset + 1);
1597 u16 table;
1598
1599 trace("MACRO\t0x%02x\n", macro);
1600
1601 table = init_macro_table(init);
1602 if (table) {
1603 u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
1604 u32 data = nv_ro32(bios, table + (macro * 8) + 4);
1605 trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
1606 init_wr32(init, addr, data);
1607 }
1608
1609 init->offset += 2;
1610}
1611
1612/**
1613 * INIT_RESUME - opcode 0x72
1614 *
1615 */
1616static void
1617init_resume(struct nvbios_init *init)
1618{
1619 trace("RESUME\n");
1620 init->offset += 1;
1621 init_exec_set(init, true);
1622}
1623
1624/**
1625 * INIT_TIME - opcode 0x74
1626 *
1627 */
1628static void
1629init_time(struct nvbios_init *init)
1630{
1631 struct nouveau_bios *bios = init->bios;
1632 u16 usec = nv_ro16(bios, init->offset + 1);
1633
1634 trace("TIME\t0x%04x\n", usec);
1635 init->offset += 3;
1636
1637 if (init_exec(init)) {
1638 if (usec < 1000)
1639 udelay(usec);
1640 else
1641 mdelay((usec + 900) / 1000);
1642 }
1643}
1644
1645/**
1646 * INIT_CONDITION - opcode 0x75
1647 *
1648 */
1649static void
1650init_condition(struct nvbios_init *init)
1651{
1652 struct nouveau_bios *bios = init->bios;
1653 u8 cond = nv_ro08(bios, init->offset + 1);
1654
1655 trace("CONDITION\t0x%02x\n", cond);
1656 init->offset += 2;
1657
1658 if (!init_condition_met(init, cond))
1659 init_exec_set(init, false);
1660}
1661
1662/**
1663 * INIT_IO_CONDITION - opcode 0x76
1664 *
1665 */
1666static void
1667init_io_condition(struct nvbios_init *init)
1668{
1669 struct nouveau_bios *bios = init->bios;
1670 u8 cond = nv_ro08(bios, init->offset + 1);
1671
1672 trace("IO_CONDITION\t0x%02x\n", cond);
1673 init->offset += 2;
1674
1675 if (!init_io_condition_met(init, cond))
1676 init_exec_set(init, false);
1677}
1678
1679/**
1680 * INIT_INDEX_IO - opcode 0x78
1681 *
1682 */
1683static void
1684init_index_io(struct nvbios_init *init)
1685{
1686 struct nouveau_bios *bios = init->bios;
1687 u16 port = nv_ro16(bios, init->offset + 1);
1688 u8 index = nv_ro16(bios, init->offset + 3);
1689 u8 mask = nv_ro08(bios, init->offset + 4);
1690 u8 data = nv_ro08(bios, init->offset + 5);
1691 u8 value;
1692
1693 trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
1694 port, index, mask, data);
1695 init->offset += 6;
1696
1697 value = init_rdvgai(init, port, index) & mask;
1698 init_wrvgai(init, port, index, data | value);
1699}
1700
1701/**
1702 * INIT_PLL - opcode 0x79
1703 *
1704 */
1705static void
1706init_pll(struct nvbios_init *init)
1707{
1708 struct nouveau_bios *bios = init->bios;
1709 u32 reg = nv_ro32(bios, init->offset + 1);
1710 u32 freq = nv_ro16(bios, init->offset + 5) * 10;
1711
1712 trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
1713 init->offset += 7;
1714
1715 init_prog_pll(init, reg, freq);
1716}
1717
1718/**
1719 * INIT_ZM_REG - opcode 0x7a
1720 *
1721 */
1722static void
1723init_zm_reg(struct nvbios_init *init)
1724{
1725 struct nouveau_bios *bios = init->bios;
1726 u32 addr = nv_ro32(bios, init->offset + 1);
1727 u32 data = nv_ro32(bios, init->offset + 5);
1728
1729 trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
1730 init->offset += 9;
1731
1732 if (addr == 0x000200)
1733 data |= 0x00000001;
1734
1735 init_wr32(init, addr, data);
1736}
1737
1738/**
1739 * INIT_RAM_RESTRICT_PLL - opcde 0x87
1740 *
1741 */
1742static void
1743init_ram_restrict_pll(struct nvbios_init *init)
1744{
1745 struct nouveau_bios *bios = init->bios;
1746 u8 type = nv_ro08(bios, init->offset + 1);
1747 u8 count = init_ram_restrict_group_count(init);
1748 u8 strap = init_ram_restrict(init);
1749 u8 cconf;
1750
1751 trace("RAM_RESTRICT_PLL\t0x%02x\n", type);
1752 init->offset += 2;
1753
1754 for (cconf = 0; cconf < count; cconf++) {
1755 u32 freq = nv_ro32(bios, init->offset);
1756
1757 if (cconf == strap) {
1758 trace("%dkHz *\n", freq);
1759 init_prog_pll(init, type, freq);
1760 } else {
1761 trace("%dkHz\n", freq);
1762 }
1763
1764 init->offset += 4;
1765 }
1766}
1767
1768/**
1769 * INIT_GPIO - opcode 0x8e
1770 *
1771 */
1772static void
1773init_gpio(struct nvbios_init *init)
1774{
1775 struct nouveau_gpio *gpio = nouveau_gpio(init->bios);
1776
1777 trace("GPIO\n");
1778 init->offset += 1;
1779
1780 if (init_exec(init) && gpio && gpio->reset)
1781 gpio->reset(gpio);
1782}
1783
1784/**
1785 * INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
1786 *
1787 */
1788static void
1789init_ram_restrict_zm_reg_group(struct nvbios_init *init)
1790{
1791 struct nouveau_bios *bios = init->bios;
1792 u32 addr = nv_ro32(bios, init->offset + 1);
1793 u8 incr = nv_ro08(bios, init->offset + 5);
1794 u8 num = nv_ro08(bios, init->offset + 6);
1795 u8 count = init_ram_restrict_group_count(init);
1796 u8 index = init_ram_restrict(init);
1797 u8 i, j;
1798
1799 trace("RAM_RESTRICT_ZM_REG_GROUP\t"
1800 "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
1801 init->offset += 7;
1802
1803 for (i = 0; i < num; i++) {
1804 trace("\tR[0x%06x] = {\n", addr);
1805 for (j = 0; j < count; j++) {
1806 u32 data = nv_ro32(bios, init->offset);
1807
1808 if (j == index) {
1809 trace("\t\t0x%08x *\n", data);
1810 init_wr32(init, addr, data);
1811 } else {
1812 trace("\t\t0x%08x\n", data);
1813 }
1814
1815 init->offset += 4;
1816 }
1817 trace("\t}\n");
1818 addr += incr;
1819 }
1820}
1821
1822/**
1823 * INIT_COPY_ZM_REG - opcode 0x90
1824 *
1825 */
1826static void
1827init_copy_zm_reg(struct nvbios_init *init)
1828{
1829 struct nouveau_bios *bios = init->bios;
1830 u32 sreg = nv_ro32(bios, init->offset + 1);
1831 u32 dreg = nv_ro32(bios, init->offset + 5);
1832
1833 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
1834 init->offset += 9;
1835
1836 init_wr32(init, dreg, init_rd32(init, sreg));
1837}
1838
1839/**
1840 * INIT_ZM_REG_GROUP - opcode 0x91
1841 *
1842 */
1843static void
1844init_zm_reg_group(struct nvbios_init *init)
1845{
1846 struct nouveau_bios *bios = init->bios;
1847 u32 addr = nv_ro32(bios, init->offset + 1);
1848 u8 count = nv_ro08(bios, init->offset + 5);
1849
1850 trace("ZM_REG_GROUP\tR[0x%06x] =\n");
1851 init->offset += 6;
1852
1853 while (count--) {
1854 u32 data = nv_ro32(bios, init->offset);
1855 trace("\t0x%08x\n", data);
1856 init_wr32(init, addr, data);
1857 init->offset += 4;
1858 }
1859}
1860
1861/**
1862 * INIT_XLAT - opcode 0x96
1863 *
1864 */
1865static void
1866init_xlat(struct nvbios_init *init)
1867{
1868 struct nouveau_bios *bios = init->bios;
1869 u32 saddr = nv_ro32(bios, init->offset + 1);
1870 u8 sshift = nv_ro08(bios, init->offset + 5);
1871 u8 smask = nv_ro08(bios, init->offset + 6);
1872 u8 index = nv_ro08(bios, init->offset + 7);
1873 u32 daddr = nv_ro32(bios, init->offset + 8);
1874 u32 dmask = nv_ro32(bios, init->offset + 12);
1875 u8 shift = nv_ro08(bios, init->offset + 16);
1876 u32 data;
1877
1878 trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
1879 "(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n",
1880 daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>",
1881 (sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift);
1882 init->offset += 17;
1883
1884 data = init_shift(init_rd32(init, saddr), sshift) & smask;
1885 data = init_xlat_(init, index, data) << shift;
1886 init_mask(init, daddr, ~dmask, data);
1887}
1888
1889/**
1890 * INIT_ZM_MASK_ADD - opcode 0x97
1891 *
1892 */
1893static void
1894init_zm_mask_add(struct nvbios_init *init)
1895{
1896 struct nouveau_bios *bios = init->bios;
1897 u32 addr = nv_ro32(bios, init->offset + 1);
1898 u32 mask = nv_ro32(bios, init->offset + 5);
1899 u32 add = nv_ro32(bios, init->offset + 9);
1900 u32 data;
1901
1902 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
1903 init->offset += 13;
1904
1905 data = init_rd32(init, addr) & mask;
1906 data |= ((data + add) & ~mask);
1907 init_wr32(init, addr, data);
1908}
1909
1910/**
1911 * INIT_AUXCH - opcode 0x98
1912 *
1913 */
1914static void
1915init_auxch(struct nvbios_init *init)
1916{
1917 struct nouveau_bios *bios = init->bios;
1918 u32 addr = nv_ro32(bios, init->offset + 1);
1919 u8 count = nv_ro08(bios, init->offset + 5);
1920
1921 trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
1922 init->offset += 6;
1923
1924 while (count--) {
1925 u8 mask = nv_ro08(bios, init->offset + 0);
1926 u8 data = nv_ro08(bios, init->offset + 1);
1927 trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
1928 mask = init_rdauxr(init, addr) & mask;
1929 init_wrauxr(init, addr, mask | data);
1930 init->offset += 2;
1931 }
1932}
1933
1934/**
1935 * INIT_AUXCH - opcode 0x99
1936 *
1937 */
1938static void
1939init_zm_auxch(struct nvbios_init *init)
1940{
1941 struct nouveau_bios *bios = init->bios;
1942 u32 addr = nv_ro32(bios, init->offset + 1);
1943 u8 count = nv_ro08(bios, init->offset + 5);
1944
1945 trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
1946 init->offset += 6;
1947
1948 while (count--) {
1949 u8 data = nv_ro08(bios, init->offset + 0);
1950 trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
1951 init_wrauxr(init, addr, data);
1952 init->offset += 1;
1953 }
1954}
1955
1956/**
1957 * INIT_I2C_LONG_IF - opcode 0x9a
1958 *
1959 */
1960static void
1961init_i2c_long_if(struct nvbios_init *init)
1962{
1963 struct nouveau_bios *bios = init->bios;
1964 u8 index = nv_ro08(bios, init->offset + 1);
1965 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
1966 u8 reglo = nv_ro08(bios, init->offset + 3);
1967 u8 reghi = nv_ro08(bios, init->offset + 4);
1968 u8 mask = nv_ro08(bios, init->offset + 5);
1969 u8 data = nv_ro08(bios, init->offset + 6);
1970 struct nouveau_i2c_port *port;
1971
1972 trace("I2C_LONG_IF\t"
1973 "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
1974 index, addr, reglo, reghi, mask, data);
1975 init->offset += 7;
1976
1977 port = init_i2c(init, index);
1978 if (port) {
1979 u8 i[2] = { reghi, reglo };
1980 u8 o[1] = {};
1981 struct i2c_msg msg[] = {
1982 { .addr = addr, .flags = 0, .len = 2, .buf = i },
1983 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o }
1984 };
1985 int ret;
1986
1987 ret = i2c_transfer(&port->adapter, msg, 2);
1988 if (ret == 2 && ((o[0] & mask) == data))
1989 return;
1990 }
1991
1992 init_exec_set(init, false);
1993}
1994
1995static struct nvbios_init_opcode {
1996 void (*exec)(struct nvbios_init *);
1997} init_opcode[] = {
1998 [0x32] = { init_io_restrict_prog },
1999 [0x33] = { init_repeat },
2000 [0x34] = { init_io_restrict_pll },
2001 [0x36] = { init_end_repeat },
2002 [0x37] = { init_copy },
2003 [0x38] = { init_not },
2004 [0x39] = { init_io_flag_condition },
2005 [0x3a] = { init_dp_condition },
2006 [0x3b] = { init_io_mask_or },
2007 [0x3c] = { init_io_or },
2008 [0x49] = { init_idx_addr_latched },
2009 [0x4a] = { init_io_restrict_pll2 },
2010 [0x4b] = { init_pll2 },
2011 [0x4c] = { init_i2c_byte },
2012 [0x4d] = { init_zm_i2c_byte },
2013 [0x4e] = { init_zm_i2c },
2014 [0x4f] = { init_tmds },
2015 [0x50] = { init_zm_tmds_group },
2016 [0x51] = { init_cr_idx_adr_latch },
2017 [0x52] = { init_cr },
2018 [0x53] = { init_zm_cr },
2019 [0x54] = { init_zm_cr_group },
2020 [0x56] = { init_condition_time },
2021 [0x57] = { init_ltime },
2022 [0x58] = { init_zm_reg_sequence },
2023 [0x5b] = { init_sub_direct },
2024 [0x5c] = { init_jump },
2025 [0x5e] = { init_i2c_if },
2026 [0x5f] = { init_copy_nv_reg },
2027 [0x62] = { init_zm_index_io },
2028 [0x63] = { init_compute_mem },
2029 [0x65] = { init_reset },
2030 [0x66] = { init_configure_mem },
2031 [0x67] = { init_configure_clk },
2032 [0x68] = { init_configure_preinit },
2033 [0x69] = { init_io },
2034 [0x6b] = { init_sub },
2035 [0x6d] = { init_ram_condition },
2036 [0x6e] = { init_nv_reg },
2037 [0x6f] = { init_macro },
2038 [0x71] = { init_done },
2039 [0x72] = { init_resume },
2040 [0x74] = { init_time },
2041 [0x75] = { init_condition },
2042 [0x76] = { init_io_condition },
2043 [0x78] = { init_index_io },
2044 [0x79] = { init_pll },
2045 [0x7a] = { init_zm_reg },
2046 [0x87] = { init_ram_restrict_pll },
2047 [0x8c] = { init_reserved },
2048 [0x8d] = { init_reserved },
2049 [0x8e] = { init_gpio },
2050 [0x8f] = { init_ram_restrict_zm_reg_group },
2051 [0x90] = { init_copy_zm_reg },
2052 [0x91] = { init_zm_reg_group },
2053 [0x92] = { init_reserved },
2054 [0x96] = { init_xlat },
2055 [0x97] = { init_zm_mask_add },
2056 [0x98] = { init_auxch },
2057 [0x99] = { init_zm_auxch },
2058 [0x9a] = { init_i2c_long_if },
2059};
2060
2061#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
2062
2063int
2064nvbios_exec(struct nvbios_init *init)
2065{
2066 init->nested++;
2067 while (init->offset) {
2068 u8 opcode = nv_ro08(init->bios, init->offset);
2069 if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
2070 error("unknown opcode 0x%02x\n", opcode);
2071 return -EINVAL;
2072 }
2073
2074 init_opcode[opcode].exec(init);
2075 }
2076 init->nested--;
2077 return 0;
2078}
2079
2080int
2081nvbios_init(struct nouveau_subdev *subdev, bool execute)
2082{
2083 struct nouveau_bios *bios = nouveau_bios(subdev);
2084 int ret = 0;
2085 int i = -1;
2086 u16 data;
2087
2088 if (execute)
2089 nv_info(bios, "running init tables\n");
2090 while (!ret && (data = (init_script(bios, ++i)))) {
2091 struct nvbios_init init = {
2092 .subdev = subdev,
2093 .bios = bios,
2094 .offset = data,
2095 .outp = NULL,
2096 .crtc = -1,
2097 .execute = execute ? 1 : 0,
2098 };
2099
2100 ret = nvbios_exec(&init);
2101 }
2102
2103 /* the vbios parser will run this right after the normal init
2104 * tables, whereas the binary driver appears to run it later.
2105 */
2106 if (!ret && (data = init_unknown_script(bios))) {
2107 struct nvbios_init init = {
2108 .subdev = subdev,
2109 .bios = bios,
2110 .offset = data,
2111 .outp = NULL,
2112 .crtc = -1,
2113 .execute = execute ? 1 : 0,
2114 };
2115
2116 ret = nvbios_exec(&init);
2117 }
2118
2119 return 0;
2120}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
new file mode 100644
index 000000000000..2610b11a99b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/mxm.h>
28
29u16
30mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
31{
32 struct bit_entry x;
33
34 if (bit_entry(bios, 'x', &x)) {
35 nv_debug(bios, "BIT 'x' table not present\n");
36 return 0x0000;
37 }
38
39 *ver = x.version;
40 *hdr = x.length;
41 if (*ver != 1 || *hdr < 3) {
42 nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
43 return 0x0000;
44 }
45
46 return x.offset;
47}
48
49/* These map MXM v2.x digital connection values to the appropriate SOR/link,
50 * hopefully they're correct for all boards within the same chipset...
51 *
52 * MXM v3.x VBIOS are nicer and provide pointers to these tables.
53 */
54static u8 nv84_sor_map[16] = {
55 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
57};
58
59static u8 nv92_sor_map[16] = {
60 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
61 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
62};
63
64static u8 nv94_sor_map[16] = {
65 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
66 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
67};
68
69static u8 nv98_sor_map[16] = {
70 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
71 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
72};
73
74u8
75mxm_sor_map(struct nouveau_bios *bios, u8 conn)
76{
77 u8 ver, hdr;
78 u16 mxm = mxm_table(bios, &ver, &hdr);
79 if (mxm && hdr >= 6) {
80 u16 map = nv_ro16(bios, mxm + 4);
81 if (map) {
82 ver = nv_ro08(bios, map);
83 if (ver == 0x10) {
84 if (conn < nv_ro08(bios, map + 3)) {
85 map += nv_ro08(bios, map + 1);
86 map += conn;
87 return nv_ro08(bios, map);
88 }
89
90 return 0x00;
91 }
92
93 nv_warn(bios, "unknown sor map v%02x\n", ver);
94 }
95 }
96
97 if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
98 return nv84_sor_map[conn];
99 if (bios->version.chip == 0x92)
100 return nv92_sor_map[conn];
101 if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
102 return nv94_sor_map[conn];
103 if (bios->version.chip == 0x98)
104 return nv98_sor_map[conn];
105
106 nv_warn(bios, "missing sor map\n");
107 return 0x00;
108}
109
110u8
111mxm_ddc_map(struct nouveau_bios *bios, u8 port)
112{
113 u8 ver, hdr;
114 u16 mxm = mxm_table(bios, &ver, &hdr);
115 if (mxm && hdr >= 8) {
116 u16 map = nv_ro16(bios, mxm + 6);
117 if (map) {
118 ver = nv_ro08(bios, map);
119 if (ver == 0x10) {
120 if (port < nv_ro08(bios, map + 3)) {
121 map += nv_ro08(bios, map + 1);
122 map += port;
123 return nv_ro08(bios, map);
124 }
125
126 return 0x00;
127 }
128
129 nv_warn(bios, "unknown ddc map v%02x\n", ver);
130 }
131 }
132
133 /* v2.x: directly write port as dcb i2cidx */
134 return (port << 4) | port;
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
new file mode 100644
index 000000000000..bcbb056c2887
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/perf.h>
28
29static u16
30perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 perf = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version <= 2) {
37 perf = nv_ro16(bios, bit_P.offset + 0);
38 if (perf) {
39 *ver = nv_ro08(bios, perf + 0);
40 *hdr = nv_ro08(bios, perf + 1);
41 }
42 } else
43 nv_error(bios, "unknown offset for perf in BIT P %d\n",
44 bit_P.version);
45 }
46
47 if (bios->bmp_offset) {
48 if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
49 perf = nv_ro16(bios, bios->bmp_offset + 0x94);
50 if (perf) {
51 *hdr = nv_ro08(bios, perf + 0);
52 *ver = nv_ro08(bios, perf + 1);
53 }
54 }
55 }
56
57 return perf;
58}
59
60int
61nvbios_perf_fan_parse(struct nouveau_bios *bios,
62 struct nvbios_perf_fan *fan)
63{
64 u8 ver = 0, hdr = 0, cnt = 0, len = 0;
65 u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
66 if (!perf)
67 return -ENODEV;
68
69 if (ver >= 0x20 && ver < 0x40 && hdr > 6)
70 fan->pwm_divisor = nv_ro16(bios, perf + 6);
71 else
72 fan->pwm_divisor = 0;
73
74 return 0;
75}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
new file mode 100644
index 000000000000..5e5f4cddae3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright 2005-2006 Erik Waling
3 * Copyright 2006 Stephane Marchesin
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include <subdev/vga.h>
26#include <subdev/bios.h>
27#include <subdev/bios/bit.h>
28#include <subdev/bios/bmp.h>
29#include <subdev/bios/pll.h>
30
31struct pll_mapping {
32 u8 type;
33 u32 reg;
34};
35
36static struct pll_mapping
37nv04_pll_mapping[] = {
38 { PLL_CORE , 0x680500 },
39 { PLL_MEMORY, 0x680504 },
40 { PLL_VPLL0 , 0x680508 },
41 { PLL_VPLL1 , 0x680520 },
42 {}
43};
44
45static struct pll_mapping
46nv40_pll_mapping[] = {
47 { PLL_CORE , 0x004000 },
48 { PLL_MEMORY, 0x004020 },
49 { PLL_VPLL0 , 0x680508 },
50 { PLL_VPLL1 , 0x680520 },
51 {}
52};
53
54static struct pll_mapping
55nv50_pll_mapping[] = {
56 { PLL_CORE , 0x004028 },
57 { PLL_SHADER, 0x004020 },
58 { PLL_UNK03 , 0x004000 },
59 { PLL_MEMORY, 0x004008 },
60 { PLL_UNK40 , 0x00e810 },
61 { PLL_UNK41 , 0x00e818 },
62 { PLL_UNK42 , 0x00e824 },
63 { PLL_VPLL0 , 0x614100 },
64 { PLL_VPLL1 , 0x614900 },
65 {}
66};
67
68static struct pll_mapping
69nv84_pll_mapping[] = {
70 { PLL_CORE , 0x004028 },
71 { PLL_SHADER, 0x004020 },
72 { PLL_MEMORY, 0x004008 },
73 { PLL_VDEC , 0x004030 },
74 { PLL_UNK41 , 0x00e818 },
75 { PLL_VPLL0 , 0x614100 },
76 { PLL_VPLL1 , 0x614900 },
77 {}
78};
79
80static u16
81pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
82{
83 struct bit_entry bit_C;
84
85 if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
86 u16 data = nv_ro16(bios, bit_C.offset + 8);
87 if (data) {
88 *ver = nv_ro08(bios, data + 0);
89 *hdr = nv_ro08(bios, data + 1);
90 *len = nv_ro08(bios, data + 2);
91 *cnt = nv_ro08(bios, data + 3);
92 return data;
93 }
94 }
95
96 if (bmp_version(bios) >= 0x0524) {
97 u16 data = nv_ro16(bios, bios->bmp_offset + 142);
98 if (data) {
99 *ver = nv_ro08(bios, data + 0);
100 *hdr = 1;
101 *cnt = 1;
102 *len = 0x18;
103 return data;
104 }
105 }
106
107 *ver = 0x00;
108 return 0x0000;
109}
110
111static struct pll_mapping *
112pll_map(struct nouveau_bios *bios)
113{
114 switch (nv_device(bios)->card_type) {
115 case NV_04:
116 case NV_10:
117 case NV_20:
118 case NV_30:
119 return nv04_pll_mapping;
120 break;
121 case NV_40:
122 return nv40_pll_mapping;
123 case NV_50:
124 if (nv_device(bios)->chipset == 0x50)
125 return nv50_pll_mapping;
126 else
127 if (nv_device(bios)->chipset < 0xa3 ||
128 nv_device(bios)->chipset == 0xaa ||
129 nv_device(bios)->chipset == 0xac)
130 return nv84_pll_mapping;
131 default:
132 return NULL;
133 }
134}
135
136static u16
137pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
138{
139 struct pll_mapping *map;
140 u8 hdr, cnt;
141 u16 data;
142
143 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
144 if (data && *ver >= 0x30) {
145 data += hdr;
146 while (cnt--) {
147 if (nv_ro32(bios, data + 3) == reg) {
148 *type = nv_ro08(bios, data + 0);
149 return data;
150 }
151 data += *len;
152 }
153 return 0x0000;
154 }
155
156 map = pll_map(bios);
157 while (map->reg) {
158 if (map->reg == reg && *ver >= 0x20) {
159 u16 addr = (data += hdr);
160 while (cnt--) {
161 if (nv_ro32(bios, data) == map->reg) {
162 *type = map->type;
163 return data;
164 }
165 data += *len;
166 }
167 return addr;
168 } else
169 if (map->reg == reg) {
170 *type = map->type;
171 return data + 1;
172 }
173 map++;
174 }
175
176 return 0x0000;
177}
178
179static u16
180pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
181{
182 struct pll_mapping *map;
183 u8 hdr, cnt;
184 u16 data;
185
186 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
187 if (data && *ver >= 0x30) {
188 data += hdr;
189 while (cnt--) {
190 if (nv_ro08(bios, data + 0) == type) {
191 *reg = nv_ro32(bios, data + 3);
192 return data;
193 }
194 data += *len;
195 }
196 return 0x0000;
197 }
198
199 map = pll_map(bios);
200 while (map->reg) {
201 if (map->type == type && *ver >= 0x20) {
202 u16 addr = (data += hdr);
203 while (cnt--) {
204 if (nv_ro32(bios, data) == map->reg) {
205 *reg = map->reg;
206 return data;
207 }
208 data += *len;
209 }
210 return addr;
211 } else
212 if (map->type == type) {
213 *reg = map->reg;
214 return data + 1;
215 }
216 map++;
217 }
218
219 return 0x0000;
220}
221
222int
223nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info)
224{
225 u8 ver, len;
226 u32 reg = type;
227 u16 data;
228
229 if (type > PLL_MAX) {
230 reg = type;
231 data = pll_map_reg(bios, reg, &type, &ver, &len);
232 } else {
233 data = pll_map_type(bios, type, &reg, &ver, &len);
234 }
235
236 if (ver && !data)
237 return -ENOENT;
238
239 memset(info, 0, sizeof(*info));
240 info->type = type;
241 info->reg = reg;
242
243 switch (ver) {
244 case 0x00:
245 break;
246 case 0x10:
247 case 0x11:
248 info->vco1.min_freq = nv_ro32(bios, data + 0);
249 info->vco1.max_freq = nv_ro32(bios, data + 4);
250 info->vco2.min_freq = nv_ro32(bios, data + 8);
251 info->vco2.max_freq = nv_ro32(bios, data + 12);
252 info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
253 info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
254 info->vco1.max_inputfreq = INT_MAX;
255 info->vco2.max_inputfreq = INT_MAX;
256
257 info->max_p = 0x7;
258 info->max_p_usable = 0x6;
259
260 /* these values taken from nv30/31/36 */
261 switch (bios->version.chip) {
262 case 0x36:
263 info->vco1.min_n = 0x5;
264 break;
265 default:
266 info->vco1.min_n = 0x1;
267 break;
268 }
269 info->vco1.max_n = 0xff;
270 info->vco1.min_m = 0x1;
271 info->vco1.max_m = 0xd;
272
273 /*
274 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
275 * table version (apart from nv35)), N2 is compared to
276 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
277 * save a comparison
278 */
279 info->vco2.min_n = 0x4;
280 switch (bios->version.chip) {
281 case 0x30:
282 case 0x35:
283 info->vco2.max_n = 0x1f;
284 break;
285 default:
286 info->vco2.max_n = 0x28;
287 break;
288 }
289 info->vco2.min_m = 0x1;
290 info->vco2.max_m = 0x4;
291 break;
292 case 0x20:
293 case 0x21:
294 info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
295 info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
296 info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
297 info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
298 info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
299 info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
300 info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
301 info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
302 info->vco1.min_n = nv_ro08(bios, data + 20);
303 info->vco1.max_n = nv_ro08(bios, data + 21);
304 info->vco1.min_m = nv_ro08(bios, data + 22);
305 info->vco1.max_m = nv_ro08(bios, data + 23);
306 info->vco2.min_n = nv_ro08(bios, data + 24);
307 info->vco2.max_n = nv_ro08(bios, data + 25);
308 info->vco2.min_m = nv_ro08(bios, data + 26);
309 info->vco2.max_m = nv_ro08(bios, data + 27);
310
311 info->max_p = nv_ro08(bios, data + 29);
312 info->max_p_usable = info->max_p;
313 if (bios->version.chip < 0x60)
314 info->max_p_usable = 0x6;
315 info->bias_p = nv_ro08(bios, data + 30);
316
317 if (len > 0x22)
318 info->refclk = nv_ro32(bios, data + 31);
319 break;
320 case 0x30:
321 data = nv_ro16(bios, data + 1);
322
323 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
324 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
325 info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
326 info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
327 info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
328 info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
329 info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
330 info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
331 info->vco1.min_n = nv_ro08(bios, data + 16);
332 info->vco1.max_n = nv_ro08(bios, data + 17);
333 info->vco1.min_m = nv_ro08(bios, data + 18);
334 info->vco1.max_m = nv_ro08(bios, data + 19);
335 info->vco2.min_n = nv_ro08(bios, data + 20);
336 info->vco2.max_n = nv_ro08(bios, data + 21);
337 info->vco2.min_m = nv_ro08(bios, data + 22);
338 info->vco2.max_m = nv_ro08(bios, data + 23);
339 info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
340 info->bias_p = nv_ro08(bios, data + 27);
341 info->refclk = nv_ro32(bios, data + 28);
342 break;
343 case 0x40:
344 info->refclk = nv_ro16(bios, data + 9) * 1000;
345 data = nv_ro16(bios, data + 1);
346
347 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
348 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
349 info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
350 info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
351 info->vco1.min_m = nv_ro08(bios, data + 8);
352 info->vco1.max_m = nv_ro08(bios, data + 9);
353 info->vco1.min_n = nv_ro08(bios, data + 10);
354 info->vco1.max_n = nv_ro08(bios, data + 11);
355 info->min_p = nv_ro08(bios, data + 12);
356 info->max_p = nv_ro08(bios, data + 13);
357 break;
358 default:
359 nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
360 return -EINVAL;
361 }
362
363 if (!info->refclk) {
364 info->refclk = nv_device(bios)->crystal;
365 if (bios->version.chip == 0x51) {
366 u32 sel_clk = nv_rd32(bios, 0x680524);
367 if ((info->reg == 0x680508 && sel_clk & 0x20) ||
368 (info->reg == 0x680520 && sel_clk & 0x80)) {
369 if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
370 info->refclk = 200000;
371 else
372 info->refclk = 25000;
373 }
374 }
375 }
376
377 /*
378 * By now any valid limit table ought to have set a max frequency for
379 * vco1, so if it's zero it's either a pre limit table bios, or one
380 * with an empty limit table (seen on nv18)
381 */
382 if (!info->vco1.max_freq) {
383 info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
384 info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
385 if (bmp_version(bios) < 0x0506) {
386 info->vco1.max_freq = 256000;
387 info->vco1.min_freq = 128000;
388 }
389
390 info->vco1.min_inputfreq = 0;
391 info->vco1.max_inputfreq = INT_MAX;
392 info->vco1.min_n = 0x1;
393 info->vco1.max_n = 0xff;
394 info->vco1.min_m = 0x1;
395
396 if (nv_device(bios)->crystal == 13500) {
397 /* nv05 does this, nv11 doesn't, nv10 unknown */
398 if (bios->version.chip < 0x11)
399 info->vco1.min_m = 0x7;
400 info->vco1.max_m = 0xd;
401 } else {
402 if (bios->version.chip < 0x11)
403 info->vco1.min_m = 0x8;
404 info->vco1.max_m = 0xe;
405 }
406
407 if (bios->version.chip < 0x17 ||
408 bios->version.chip == 0x1a ||
409 bios->version.chip == 0x20)
410 info->max_p = 4;
411 else
412 info->max_p = 5;
413 info->max_p_usable = info->max_p;
414 }
415
416 return 0;
417}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
new file mode 100644
index 000000000000..862a08a2ae27
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/therm.h>
28
29static u16
30therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
31{
32 struct bit_entry bit_P;
33 u16 therm = 0;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 1)
37 therm = nv_ro16(bios, bit_P.offset + 12);
38 else if (bit_P.version == 2)
39 therm = nv_ro16(bios, bit_P.offset + 16);
40 else
41 nv_error(bios,
42 "unknown offset for thermal in BIT P %d\n",
43 bit_P.version);
44 }
45
46 /* exit now if we haven't found the thermal table */
47 if (!therm)
48 return 0x0000;
49
50 *ver = nv_ro08(bios, therm + 0);
51 *hdr = nv_ro08(bios, therm + 1);
52 *len = nv_ro08(bios, therm + 2);
53 *cnt = nv_ro08(bios, therm + 3);
54
55 return therm + nv_ro08(bios, therm + 1);
56}
57
58u16
59nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
60{
61 u8 hdr, cnt;
62 u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
63 if (therm && idx < cnt)
64 return therm + idx * *len;
65 return 0x0000;
66}
67
68int
69nvbios_therm_sensor_parse(struct nouveau_bios *bios,
70 enum nvbios_therm_domain domain,
71 struct nvbios_therm_sensor *sensor)
72{
73 s8 thrs_section, sensor_section, offset;
74 u8 ver, len, i;
75 u16 entry;
76
77 /* we only support the core domain for now */
78 if (domain != NVBIOS_THERM_DOMAIN_CORE)
79 return -EINVAL;
80
81 /* Read the entries from the table */
82 thrs_section = 0;
83 sensor_section = -1;
84 i = 0;
85 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
86 s16 value = nv_ro16(bios, entry + 1);
87
88 switch (nv_ro08(bios, entry + 0)) {
89 case 0x0:
90 thrs_section = value;
91 if (value > 0)
92 return 0; /* we do not try to support ambient */
93 break;
94 case 0x01:
95 sensor_section++;
96 if (sensor_section == 0) {
97 offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
98 sensor->offset_constant = offset;
99 }
100 break;
101
102 case 0x04:
103 if (thrs_section == 0) {
104 sensor->thrs_critical.temp = (value & 0xff0) >> 4;
105 sensor->thrs_critical.hysteresis = value & 0xf;
106 }
107 break;
108
109 case 0x07:
110 if (thrs_section == 0) {
111 sensor->thrs_down_clock.temp = (value & 0xff0) >> 4;
112 sensor->thrs_down_clock.hysteresis = value & 0xf;
113 }
114 break;
115
116 case 0x08:
117 if (thrs_section == 0) {
118 sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4;
119 sensor->thrs_fan_boost.hysteresis = value & 0xf;
120 }
121 break;
122
123 case 0x10:
124 if (sensor_section == 0)
125 sensor->offset_num = value;
126 break;
127
128 case 0x11:
129 if (sensor_section == 0)
130 sensor->offset_den = value;
131 break;
132
133 case 0x12:
134 if (sensor_section == 0)
135 sensor->slope_mult = value;
136 break;
137
138 case 0x13:
139 if (sensor_section == 0)
140 sensor->slope_div = value;
141 break;
142 case 0x32:
143 if (thrs_section == 0) {
144 sensor->thrs_shutdown.temp = (value & 0xff0) >> 4;
145 sensor->thrs_shutdown.hysteresis = value & 0xf;
146 }
147 break;
148 }
149 }
150
151 return 0;
152}
153
154int
155nvbios_therm_fan_parse(struct nouveau_bios *bios,
156 struct nvbios_therm_fan *fan)
157{
158 u8 ver, len, i;
159 u16 entry;
160
161 i = 0;
162 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
163 s16 value = nv_ro16(bios, entry + 1);
164
165 switch (nv_ro08(bios, entry + 0)) {
166 case 0x22:
167 fan->min_duty = value & 0xff;
168 fan->max_duty = (value & 0xff00) >> 8;
169 break;
170 case 0x26:
171 fan->pwm_freq = value;
172 break;
173 }
174 }
175
176 return 0;
177}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
new file mode 100644
index 000000000000..b7fd1151166e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -0,0 +1,359 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nv04_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36powerctrl_1_shift(int chip_version, int reg)
37{
38 int shift = -4;
39
40 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
41 return shift;
42
43 switch (reg) {
44 case 0x680520:
45 shift += 4;
46 case 0x680508:
47 shift += 4;
48 case 0x680504:
49 shift += 4;
50 case 0x680500:
51 shift += 4;
52 }
53
54 /*
55 * the shift for vpll regs is only used for nv3x chips with a single
56 * stage pll
57 */
58 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
59 chip_version == 0x36 || chip_version >= 0x40))
60 shift = -4;
61
62 return shift;
63}
64
65static void
66setPLL_single(struct nv04_clock_priv *priv, u32 reg,
67 struct nouveau_pll_vals *pv)
68{
69 int chip_version = nouveau_bios(priv)->version.chip;
70 uint32_t oldpll = nv_rd32(priv, reg);
71 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
72 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
73 uint32_t saved_powerctrl_1 = 0;
74 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
75
76 if (oldpll == pll)
77 return; /* already set */
78
79 if (shift_powerctrl_1 >= 0) {
80 saved_powerctrl_1 = nv_rd32(priv, 0x001584);
81 nv_wr32(priv, 0x001584,
82 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
83 1 << shift_powerctrl_1);
84 }
85
86 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
87 /* upclock -- write new post divider first */
88 nv_wr32(priv, reg, pv->log2P << 16 | (oldpll & 0xffff));
89 else
90 /* downclock -- write new NM first */
91 nv_wr32(priv, reg, (oldpll & 0xffff0000) | pv->NM1);
92
93 if (chip_version < 0x17 && chip_version != 0x11)
94 /* wait a bit on older chips */
95 msleep(64);
96 nv_rd32(priv, reg);
97
98 /* then write the other half as well */
99 nv_wr32(priv, reg, pll);
100
101 if (shift_powerctrl_1 >= 0)
102 nv_wr32(priv, 0x001584, saved_powerctrl_1);
103}
104
105static uint32_t
106new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
107{
108 bool head_a = (reg1 == 0x680508);
109
110 if (ss) /* single stage pll mode */
111 ramdac580 |= head_a ? 0x00000100 : 0x10000000;
112 else
113 ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;
114
115 return ramdac580;
116}
117
118static void
119setPLL_double_highregs(struct nv04_clock_priv *priv, u32 reg1,
120 struct nouveau_pll_vals *pv)
121{
122 int chip_version = nouveau_bios(priv)->version.chip;
123 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
124 uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
125 uint32_t oldpll1 = nv_rd32(priv, reg1);
126 uint32_t oldpll2 = !nv3035 ? nv_rd32(priv, reg2) : 0;
127 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
128 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
129 uint32_t oldramdac580 = 0, ramdac580 = 0;
130 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
131 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
132 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
133
134 /* model specific additions to generic pll1 and pll2 set up above */
135 if (nv3035) {
136 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
137 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
138 pll2 = 0;
139 }
140 if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
141 oldramdac580 = nv_rd32(priv, 0x680580);
142 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
143 if (oldramdac580 != ramdac580)
144 oldpll1 = ~0; /* force mismatch */
145 if (single_stage)
146 /* magic value used by nvidia in single stage mode */
147 pll2 |= 0x011f;
148 }
149 if (chip_version > 0x70)
150 /* magic bits set by the blob (but not the bios) on g71-73 */
151 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
152
153 if (oldpll1 == pll1 && oldpll2 == pll2)
154 return; /* already set */
155
156 if (shift_powerctrl_1 >= 0) {
157 saved_powerctrl_1 = nv_rd32(priv, 0x001584);
158 nv_wr32(priv, 0x001584,
159 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
160 1 << shift_powerctrl_1);
161 }
162
163 if (chip_version >= 0x40) {
164 int shift_c040 = 14;
165
166 switch (reg1) {
167 case 0x680504:
168 shift_c040 += 2;
169 case 0x680500:
170 shift_c040 += 2;
171 case 0x680520:
172 shift_c040 += 2;
173 case 0x680508:
174 shift_c040 += 2;
175 }
176
177 savedc040 = nv_rd32(priv, 0xc040);
178 if (shift_c040 != 14)
179 nv_wr32(priv, 0xc040, savedc040 & ~(3 << shift_c040));
180 }
181
182 if (oldramdac580 != ramdac580)
183 nv_wr32(priv, 0x680580, ramdac580);
184
185 if (!nv3035)
186 nv_wr32(priv, reg2, pll2);
187 nv_wr32(priv, reg1, pll1);
188
189 if (shift_powerctrl_1 >= 0)
190 nv_wr32(priv, 0x001584, saved_powerctrl_1);
191 if (chip_version >= 0x40)
192 nv_wr32(priv, 0xc040, savedc040);
193}
194
195static void
196setPLL_double_lowregs(struct nv04_clock_priv *priv, u32 NMNMreg,
197 struct nouveau_pll_vals *pv)
198{
199 /* When setting PLLs, there is a merry game of disabling and enabling
200 * various bits of hardware during the process. This function is a
201 * synthesis of six nv4x traces, nearly each card doing a subtly
202 * different thing. With luck all the necessary bits for each card are
203 * combined herein. Without luck it deviates from each card's formula
204 * so as to not work on any :)
205 */
206
207 uint32_t Preg = NMNMreg - 4;
208 bool mpll = Preg == 0x4020;
209 uint32_t oldPval = nv_rd32(priv, Preg);
210 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
211 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
212 0xc << 28 | pv->log2P << 16;
213 uint32_t saved4600 = 0;
214 /* some cards have different maskc040s */
215 uint32_t maskc040 = ~(3 << 14), savedc040;
216 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
217
218 if (nv_rd32(priv, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
219 return;
220
221 if (Preg == 0x4000)
222 maskc040 = ~0x333;
223 if (Preg == 0x4058)
224 maskc040 = ~(0xc << 24);
225
226 if (mpll) {
227 struct nvbios_pll info;
228 uint8_t Pval2;
229
230 if (nvbios_pll_parse(nouveau_bios(priv), Preg, &info))
231 return;
232
233 Pval2 = pv->log2P + info.bias_p;
234 if (Pval2 > info.max_p)
235 Pval2 = info.max_p;
236 Pval |= 1 << 28 | Pval2 << 20;
237
238 saved4600 = nv_rd32(priv, 0x4600);
239 nv_wr32(priv, 0x4600, saved4600 | 8 << 28);
240 }
241 if (single_stage)
242 Pval |= mpll ? 1 << 12 : 1 << 8;
243
244 nv_wr32(priv, Preg, oldPval | 1 << 28);
245 nv_wr32(priv, Preg, Pval & ~(4 << 28));
246 if (mpll) {
247 Pval |= 8 << 20;
248 nv_wr32(priv, 0x4020, Pval & ~(0xc << 28));
249 nv_wr32(priv, 0x4038, Pval & ~(0xc << 28));
250 }
251
252 savedc040 = nv_rd32(priv, 0xc040);
253 nv_wr32(priv, 0xc040, savedc040 & maskc040);
254
255 nv_wr32(priv, NMNMreg, NMNM);
256 if (NMNMreg == 0x4024)
257 nv_wr32(priv, 0x403c, NMNM);
258
259 nv_wr32(priv, Preg, Pval);
260 if (mpll) {
261 Pval &= ~(8 << 20);
262 nv_wr32(priv, 0x4020, Pval);
263 nv_wr32(priv, 0x4038, Pval);
264 nv_wr32(priv, 0x4600, saved4600);
265 }
266
267 nv_wr32(priv, 0xc040, savedc040);
268
269 if (mpll) {
270 nv_wr32(priv, 0x4020, Pval & ~(1 << 28));
271 nv_wr32(priv, 0x4038, Pval & ~(1 << 28));
272 }
273}
274
275int
276nv04_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
277{
278 struct nv04_clock_priv *priv = (void *)clk;
279 struct nouveau_pll_vals pv;
280 struct nvbios_pll info;
281 int ret;
282
283 ret = nvbios_pll_parse(nouveau_bios(priv), type > 0x405c ?
284 type : type - 4, &info);
285 if (ret)
286 return ret;
287
288 ret = clk->pll_calc(clk, &info, freq, &pv);
289 if (!ret)
290 return ret;
291
292 return clk->pll_prog(clk, type, &pv);
293}
294
295int
296nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
297 int clk, struct nouveau_pll_vals *pv)
298{
299 int N1, M1, N2, M2, P;
300 int ret = nv04_pll_calc(clock, info, clk, &N1, &M1, &N2, &M2, &P);
301 if (ret) {
302 pv->refclk = info->refclk;
303 pv->N1 = N1;
304 pv->M1 = M1;
305 pv->N2 = N2;
306 pv->M2 = M2;
307 pv->log2P = P;
308 }
309 return ret;
310}
311
312int
313nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
314 struct nouveau_pll_vals *pv)
315{
316 struct nv04_clock_priv *priv = (void *)clk;
317 int cv = nouveau_bios(clk)->version.chip;
318
319 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
320 cv >= 0x40) {
321 if (reg1 > 0x405c)
322 setPLL_double_highregs(priv, reg1, pv);
323 else
324 setPLL_double_lowregs(priv, reg1, pv);
325 } else
326 setPLL_single(priv, reg1, pv);
327
328 return 0;
329}
330
331static int
332nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
333 struct nouveau_oclass *oclass, void *data, u32 size,
334 struct nouveau_object **pobject)
335{
336 struct nv04_clock_priv *priv;
337 int ret;
338
339 ret = nouveau_clock_create(parent, engine, oclass, &priv);
340 *pobject = nv_object(priv);
341 if (ret)
342 return ret;
343
344 priv->base.pll_set = nv04_clock_pll_set;
345 priv->base.pll_calc = nv04_clock_pll_calc;
346 priv->base.pll_prog = nv04_clock_pll_prog;
347 return 0;
348}
349
350struct nouveau_oclass
351nv04_clock_oclass = {
352 .handle = NV_SUBDEV(CLOCK, 0x04),
353 .ofuncs = &(struct nouveau_ofuncs) {
354 .ctor = nv04_clock_ctor,
355 .dtor = _nouveau_clock_dtor,
356 .init = _nouveau_clock_init,
357 .fini = _nouveau_clock_fini,
358 },
359};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index c82de98fee0e..a4b2b7ebf9af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,34 +22,38 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifndef __NOUVEAU_RAMHT_H__ 25#include <subdev/clock.h>
26#define __NOUVEAU_RAMHT_H__
27 26
28struct nouveau_ramht_entry { 27struct nv40_clock_priv {
29 struct list_head head; 28 struct nouveau_clock base;
30 struct nouveau_channel *channel;
31 struct nouveau_gpuobj *gpuobj;
32 u32 handle;
33}; 29};
34 30
35struct nouveau_ramht { 31static int
36 struct drm_device *dev; 32nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct kref refcount; 33 struct nouveau_oclass *oclass, void *data, u32 size,
38 spinlock_t lock; 34 struct nouveau_object **pobject)
39 struct nouveau_gpuobj *gpuobj; 35{
40 struct list_head entries; 36 struct nv40_clock_priv *priv;
41 int bits; 37 int ret;
42};
43 38
44extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, 39 ret = nouveau_clock_create(parent, engine, oclass, &priv);
45 struct nouveau_ramht **); 40 *pobject = nv_object(priv);
46extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, 41 if (ret)
47 struct nouveau_channel *unref_channel); 42 return ret;
48 43
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, 44 priv->base.pll_set = nv04_clock_pll_set;
50 struct nouveau_gpuobj *); 45 priv->base.pll_calc = nv04_clock_pll_calc;
51extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); 46 priv->base.pll_prog = nv04_clock_pll_prog;
52extern struct nouveau_gpuobj * 47 return 0;
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); 48}
54 49
55#endif 50struct nouveau_oclass
51nv40_clock_oclass = {
52 .handle = NV_SUBDEV(CLOCK, 0x40),
53 .ofuncs = &(struct nouveau_ofuncs) {
54 .ctor = nv40_clock_ctor,
55 .dtor = _nouveau_clock_dtor,
56 .init = _nouveau_clock_init,
57 .fini = _nouveau_clock_fini,
58 },
59};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
new file mode 100644
index 000000000000..fd181fbceddb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nv50_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nv50_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nv50_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N1, M1, N2, M2, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret) {
46 nv_error(clk, "failed to retrieve pll data, %d\n", ret);
47 return ret;
48 }
49
50 ret = nv04_pll_calc(clk, &info, freq, &N1, &M1, &N2, &M2, &P);
51 if (!ret) {
52 nv_error(clk, "failed pll calculation\n");
53 return ret;
54 }
55
56 switch (info.type) {
57 case PLL_VPLL0:
58 case PLL_VPLL1:
59 nv_wr32(priv, info.reg + 0, 0x10000611);
60 nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
61 nv_mask(priv, info.reg + 8, 0x7fff00ff, (P << 28) |
62 (M2 << 16) | N2);
63 break;
64 case PLL_MEMORY:
65 nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
66 (info.bias_p << 19) |
67 (P << 16));
68 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
69 break;
70 default:
71 nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
72 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
73 break;
74 }
75
76 return 0;
77}
78
79static int
80nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nv50_clock_priv *priv;
85 int ret;
86
87 ret = nouveau_clock_create(parent, engine, oclass, &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91
92 priv->base.pll_set = nv50_clock_pll_set;
93 return 0;
94}
95
96struct nouveau_oclass
97nv50_clock_oclass = {
98 .handle = NV_SUBDEV(CLOCK, 0x50),
99 .ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nv50_clock_ctor,
101 .dtor = _nouveau_clock_dtor,
102 .init = _nouveau_clock_init,
103 .fini = _nouveau_clock_fini,
104 },
105};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
new file mode 100644
index 000000000000..cc8d7d162d7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nva3_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nva3_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N, fN, M, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret)
46 return ret;
47
48 ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
49 if (ret < 0)
50 return ret;
51
52 switch (info.type) {
53 case PLL_VPLL0:
54 case PLL_VPLL1:
55 nv_wr32(priv, info.reg + 0, 0x50000610);
56 nv_mask(priv, info.reg + 4, 0x003fffff,
57 (P << 16) | (M << 8) | N);
58 nv_wr32(priv, info.reg + 8, fN);
59 break;
60 default:
61 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
62 ret = -EINVAL;
63 break;
64 }
65
66 return ret;
67}
68
69static int
70nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 struct nouveau_oclass *oclass, void *data, u32 size,
72 struct nouveau_object **pobject)
73{
74 struct nva3_clock_priv *priv;
75 int ret;
76
77 ret = nouveau_clock_create(parent, engine, oclass, &priv);
78 *pobject = nv_object(priv);
79 if (ret)
80 return ret;
81
82 priv->base.pll_set = nva3_clock_pll_set;
83 return 0;
84}
85
86struct nouveau_oclass
87nva3_clock_oclass = {
88 .handle = NV_SUBDEV(CLOCK, 0xa3),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nva3_clock_ctor,
91 .dtor = _nouveau_clock_dtor,
92 .init = _nouveau_clock_init,
93 .fini = _nouveau_clock_fini,
94 },
95};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
new file mode 100644
index 000000000000..5ccce0b17bf3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nvc0_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nvc0_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N, fN, M, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret)
46 return ret;
47
48 ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
49 if (ret < 0)
50 return ret;
51
52 switch (info.type) {
53 case PLL_VPLL0:
54 case PLL_VPLL1:
55 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
56 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
57 nv_wr32(priv, info.reg + 0x10, fN << 16);
58 break;
59 default:
60 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
61 ret = -EINVAL;
62 break;
63 }
64
65 return ret;
66}
67
68static int
69nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *data, u32 size,
71 struct nouveau_object **pobject)
72{
73 struct nvc0_clock_priv *priv;
74 int ret;
75
76 ret = nouveau_clock_create(parent, engine, oclass, &priv);
77 *pobject = nv_object(priv);
78 if (ret)
79 return ret;
80
81 priv->base.pll_set = nvc0_clock_pll_set;
82 return 0;
83}
84
85struct nouveau_oclass
86nvc0_clock_oclass = {
87 .handle = NV_SUBDEV(CLOCK, 0xc0),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nvc0_clock_ctor,
90 .dtor = _nouveau_clock_dtor,
91 .init = _nouveau_clock_init,
92 .fini = _nouveau_clock_fini,
93 },
94};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
new file mode 100644
index 000000000000..ef2c0078f337
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
@@ -0,0 +1,9 @@
1#ifndef __NOUVEAU_PLL_H__
2#define __NOUVEAU_PLL_H__
3
4int nv04_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
5 int *N1, int *M1, int *N2, int *M2, int *P);
6int nva3_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
7 int *N, int *fN, int *M, int *P);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
new file mode 100644
index 000000000000..a2ab6d051ba8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -0,0 +1,242 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <subdev/clock.h>
25#include <subdev/bios.h>
26#include <subdev/bios/pll.h>
27
28#include "pll.h"
29
30static int
31getMNP_single(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
32 int *pN, int *pM, int *pP)
33{
34 /* Find M, N and P for a single stage PLL
35 *
36 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
37 * values, but we're too lazy to use those atm
38 *
39 * "clk" parameter in kHz
40 * returns calculated clock
41 */
42 int cv = nouveau_bios(clock)->version.chip;
43 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
44 int minM = info->vco1.min_m, maxM = info->vco1.max_m;
45 int minN = info->vco1.min_n, maxN = info->vco1.max_n;
46 int minU = info->vco1.min_inputfreq;
47 int maxU = info->vco1.max_inputfreq;
48 int minP = info->min_p;
49 int maxP = info->max_p_usable;
50 int crystal = info->refclk;
51 int M, N, thisP, P;
52 int clkP, calcclk;
53 int delta, bestdelta = INT_MAX;
54 int bestclk = 0;
55
56 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
57 /* possibly correlated with introduction of 27MHz crystal */
58 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
59 if (clk > 250000)
60 maxM = 6;
61 if (clk > 340000)
62 maxM = 2;
63 } else if (cv < 0x40) {
64 if (clk > 150000)
65 maxM = 6;
66 if (clk > 200000)
67 maxM = 4;
68 if (clk > 340000)
69 maxM = 2;
70 }
71
72 P = 1 << maxP;
73 if ((clk * P) < minvco) {
74 minvco = clk * maxP;
75 maxvco = minvco * 2;
76 }
77
78 if (clk + clk/200 > maxvco) /* +0.5% */
79 maxvco = clk + clk/200;
80
81 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
82 for (thisP = minP; thisP <= maxP; thisP++) {
83 P = 1 << thisP;
84 clkP = clk * P;
85
86 if (clkP < minvco)
87 continue;
88 if (clkP > maxvco)
89 return bestclk;
90
91 for (M = minM; M <= maxM; M++) {
92 if (crystal/M < minU)
93 return bestclk;
94 if (crystal/M > maxU)
95 continue;
96
97 /* add crystal/2 to round better */
98 N = (clkP * M + crystal/2) / crystal;
99
100 if (N < minN)
101 continue;
102 if (N > maxN)
103 break;
104
105 /* more rounding additions */
106 calcclk = ((N * crystal + P/2) / P + M/2) / M;
107 delta = abs(calcclk - clk);
108 /* we do an exhaustive search rather than terminating
109 * on an optimality condition...
110 */
111 if (delta < bestdelta) {
112 bestdelta = delta;
113 bestclk = calcclk;
114 *pN = N;
115 *pM = M;
116 *pP = thisP;
117 if (delta == 0) /* except this one */
118 return bestclk;
119 }
120 }
121 }
122
123 return bestclk;
124}
125
126static int
127getMNP_double(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
128 int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
129{
130 /* Find M, N and P for a two stage PLL
131 *
132 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
133 * values, but we're too lazy to use those atm
134 *
135 * "clk" parameter in kHz
136 * returns calculated clock
137 */
138 int chip_version = nouveau_bios(clock)->version.chip;
139 int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
140 int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
141 int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
142 int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq;
143 int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m;
144 int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n;
145 int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m;
146 int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n;
147 int maxlog2P = info->max_p_usable;
148 int crystal = info->refclk;
149 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
150 int M1, N1, M2, N2, log2P;
151 int clkP, calcclk1, calcclk2, calcclkout;
152 int delta, bestdelta = INT_MAX;
153 int bestclk = 0;
154
155 int vco2 = (maxvco2 - maxvco2/200) / 2;
156 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
157 ;
158 clkP = clk << log2P;
159
160 if (maxvco2 < clk + clk/200) /* +0.5% */
161 maxvco2 = clk + clk/200;
162
163 for (M1 = minM1; M1 <= maxM1; M1++) {
164 if (crystal/M1 < minU1)
165 return bestclk;
166 if (crystal/M1 > maxU1)
167 continue;
168
169 for (N1 = minN1; N1 <= maxN1; N1++) {
170 calcclk1 = crystal * N1 / M1;
171 if (calcclk1 < minvco1)
172 continue;
173 if (calcclk1 > maxvco1)
174 break;
175
176 for (M2 = minM2; M2 <= maxM2; M2++) {
177 if (calcclk1/M2 < minU2)
178 break;
179 if (calcclk1/M2 > maxU2)
180 continue;
181
182 /* add calcclk1/2 to round better */
183 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
184 if (N2 < minN2)
185 continue;
186 if (N2 > maxN2)
187 break;
188
189 if (!fixedgain2) {
190 if (chip_version < 0x60)
191 if (N2/M2 < 4 || N2/M2 > 10)
192 continue;
193
194 calcclk2 = calcclk1 * N2 / M2;
195 if (calcclk2 < minvco2)
196 break;
197 if (calcclk2 > maxvco2)
198 continue;
199 } else
200 calcclk2 = calcclk1;
201
202 calcclkout = calcclk2 >> log2P;
203 delta = abs(calcclkout - clk);
204 /* we do an exhaustive search rather than terminating
205 * on an optimality condition...
206 */
207 if (delta < bestdelta) {
208 bestdelta = delta;
209 bestclk = calcclkout;
210 *pN1 = N1;
211 *pM1 = M1;
212 *pN2 = N2;
213 *pM2 = M2;
214 *pP = log2P;
215 if (delta == 0) /* except this one */
216 return bestclk;
217 }
218 }
219 }
220 }
221
222 return bestclk;
223}
224
225int
226nv04_pll_calc(struct nouveau_clock *clk, struct nvbios_pll *info, u32 freq,
227 int *N1, int *M1, int *N2, int *M2, int *P)
228{
229 int ret;
230
231 if (!info->vco2.max_freq) {
232 ret = getMNP_single(clk, info, freq, N1, M1, P);
233 *N2 = 1;
234 *M2 = 1;
235 } else {
236 ret = getMNP_double(clk, info, freq, N1, M1, N2, M2, P);
237 }
238
239 if (!ret)
240 nv_error(clk, "unable to compute acceptable pll values\n");
241 return ret;
242}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 8cf63a8b30cd..eed5c16cf610 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -22,60 +22,43 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <subdev/clock.h>
26#include "nouveau_drv.h" 26#include <subdev/bios.h>
27#include "nouveau_hw.h" 27#include <subdev/bios/pll.h>
28 28
29int 29#include "pll.h"
30nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
31 int *N1, int *M1, int *N2, int *M2, int *P)
32{
33 struct nouveau_pll_vals pll_vals;
34 int ret;
35
36 ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
37 if (ret <= 0)
38 return ret;
39
40 *N1 = pll_vals.N1;
41 *M1 = pll_vals.M1;
42 *N2 = pll_vals.N2;
43 *M2 = pll_vals.M2;
44 *P = pll_vals.log2P;
45 return ret;
46}
47 30
48int 31int
49nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, 32nva3_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
50 int *pN, int *pfN, int *pM, int *P) 33 u32 freq, int *pN, int *pfN, int *pM, int *P)
51{ 34{
52 u32 best_err = ~0, err; 35 u32 best_err = ~0, err;
53 int M, lM, hM, N, fN; 36 int M, lM, hM, N, fN;
54 37
55 *P = pll->vco1.maxfreq / clk; 38 *P = info->vco1.max_freq / freq;
56 if (*P > pll->max_p) 39 if (*P > info->max_p)
57 *P = pll->max_p; 40 *P = info->max_p;
58 if (*P < pll->min_p) 41 if (*P < info->min_p)
59 *P = pll->min_p; 42 *P = info->min_p;
60 43
61 lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq; 44 lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq;
62 lM = max(lM, (int)pll->vco1.min_m); 45 lM = max(lM, (int)info->vco1.min_m);
63 hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq; 46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
64 hM = min(hM, (int)pll->vco1.max_m); 47 hM = min(hM, (int)info->vco1.max_m);
65 48
66 for (M = lM; M <= hM; M++) { 49 for (M = lM; M <= hM; M++) {
67 u32 tmp = clk * *P * M; 50 u32 tmp = freq * *P * M;
68 N = tmp / pll->refclk; 51 N = tmp / info->refclk;
69 fN = tmp % pll->refclk; 52 fN = tmp % info->refclk;
70 if (!pfN && fN >= pll->refclk / 2) 53 if (!pfN && fN >= info->refclk / 2)
71 N++; 54 N++;
72 55
73 if (N < pll->vco1.min_n) 56 if (N < info->vco1.min_n)
74 continue; 57 continue;
75 if (N > pll->vco1.max_n) 58 if (N > info->vco1.max_n)
76 break; 59 break;
77 60
78 err = abs(clk - (pll->refclk * N / M / *P)); 61 err = abs(freq - (info->refclk * N / M / *P));
79 if (err < best_err) { 62 if (err < best_err) {
80 best_err = err; 63 best_err = err;
81 *pN = N; 64 *pN = N;
@@ -83,15 +66,15 @@ nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
83 } 66 }
84 67
85 if (pfN) { 68 if (pfN) {
86 *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff; 69 *pfN = (((fN << 13) / info->refclk) - 4096) & 0xffff;
87 return clk; 70 return freq;
88 } 71 }
89 } 72 }
90 73
91 if (unlikely(best_err == ~0)) { 74 if (unlikely(best_err == ~0)) {
92 NV_ERROR(dev, "unable to find matching pll values\n"); 75 nv_error(clock, "unable to find matching pll values\n");
93 return -EINVAL; 76 return -EINVAL;
94 } 77 }
95 78
96 return pll->refclk * *pN / *pM / *P; 79 return info->refclk * *pN / *pM / *P;
97} 80}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
new file mode 100644
index 000000000000..ca9a4648bd8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27#include <core/client.h>
28#include <core/device.h>
29#include <core/option.h>
30
31#include <core/class.h>
32
33#include <subdev/device.h>
34
35static DEFINE_MUTEX(nv_devices_mutex);
36static LIST_HEAD(nv_devices);
37
38struct nouveau_device *
39nouveau_device_find(u64 name)
40{
41 struct nouveau_device *device, *match = NULL;
42 mutex_lock(&nv_devices_mutex);
43 list_for_each_entry(device, &nv_devices, head) {
44 if (device->handle == name) {
45 match = device;
46 break;
47 }
48 }
49 mutex_unlock(&nv_devices_mutex);
50 return match;
51}
52
53/******************************************************************************
54 * nouveau_devobj (0x0080): class implementation
55 *****************************************************************************/
56struct nouveau_devobj {
57 struct nouveau_parent base;
58 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
59 bool created;
60};
61
62static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
78 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
79 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
80 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
81 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
82 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
83 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
84 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
85 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
86 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
87 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
88 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
89 [NVDEV_SUBDEV_NR] = 0,
90};
91
92static int
93nouveau_devobj_ctor(struct nouveau_object *parent,
94 struct nouveau_object *engine,
95 struct nouveau_oclass *oclass, void *data, u32 size,
96 struct nouveau_object **pobject)
97{
98 struct nouveau_client *client = nv_client(parent);
99 struct nouveau_device *device;
100 struct nouveau_devobj *devobj;
101 struct nv_device_class *args = data;
102 u64 disable, boot0, strap;
103 u64 mmio_base, mmio_size;
104 void __iomem *map;
105 int ret, i, c;
106
107 if (size < sizeof(struct nv_device_class))
108 return -EINVAL;
109
110 /* find the device subdev that matches what the client requested */
111 device = nv_device(client->device);
112 if (args->device != ~0) {
113 device = nouveau_device_find(args->device);
114 if (!device)
115 return -ENODEV;
116 }
117
118 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
119 (1ULL << NVDEV_ENGINE_DMAOBJ) |
120 (1ULL << NVDEV_ENGINE_FIFO) |
121 (1ULL << NVDEV_ENGINE_DISP), &devobj);
122 *pobject = nv_object(devobj);
123 if (ret)
124 return ret;
125
126 mmio_base = pci_resource_start(device->pdev, 0);
127 mmio_size = pci_resource_len(device->pdev, 0);
128
129 /* translate api disable mask into internal mapping */
130 disable = args->debug0;
131 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
132 if (args->disable & disable_map[i])
133 disable |= (1ULL << i);
134 }
135
136 /* identify the chipset, and determine classes of subdev/engines */
137 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
138 !device->card_type) {
139 map = ioremap(mmio_base, 0x102000);
140 if (map == NULL)
141 return -ENOMEM;
142
143 /* switch mmio to cpu's native endianness */
144#ifndef __BIG_ENDIAN
145 if (ioread32_native(map + 0x000004) != 0x00000000)
146#else
147 if (ioread32_native(map + 0x000004) == 0x00000000)
148#endif
149 iowrite32_native(0x01000001, map + 0x000004);
150
151 /* read boot0 and strapping information */
152 boot0 = ioread32_native(map + 0x000000);
153 strap = ioread32_native(map + 0x101000);
154 iounmap(map);
155
156 /* determine chipset and derive architecture from it */
157 if ((boot0 & 0x0f000000) > 0) {
158 device->chipset = (boot0 & 0xff00000) >> 20;
159 switch (device->chipset & 0xf0) {
160 case 0x10: device->card_type = NV_10; break;
161 case 0x20: device->card_type = NV_20; break;
162 case 0x30: device->card_type = NV_30; break;
163 case 0x40:
164 case 0x60: device->card_type = NV_40; break;
165 case 0x50:
166 case 0x80:
167 case 0x90:
168 case 0xa0: device->card_type = NV_50; break;
169 case 0xc0: device->card_type = NV_C0; break;
170 case 0xd0: device->card_type = NV_D0; break;
171 case 0xe0: device->card_type = NV_E0; break;
172 default:
173 break;
174 }
175 } else
176 if ((boot0 & 0xff00fff0) == 0x20004000) {
177 if (boot0 & 0x00f00000)
178 device->chipset = 0x05;
179 else
180 device->chipset = 0x04;
181 device->card_type = NV_04;
182 }
183
184 switch (device->card_type) {
185 case NV_04: ret = nv04_identify(device); break;
186 case NV_10: ret = nv10_identify(device); break;
187 case NV_20: ret = nv20_identify(device); break;
188 case NV_30: ret = nv30_identify(device); break;
189 case NV_40: ret = nv40_identify(device); break;
190 case NV_50: ret = nv50_identify(device); break;
191 case NV_C0:
192 case NV_D0: ret = nvc0_identify(device); break;
193 case NV_E0: ret = nve0_identify(device); break;
194 default:
195 ret = -EINVAL;
196 break;
197 }
198
199 if (ret) {
200 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
201 return ret;
202 }
203
204 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
205 nv_info(device, "Chipset: %s (NV%02X)\n",
206 device->cname, device->chipset);
207 nv_info(device, "Family : NV%02X\n", device->card_type);
208
209 /* determine frequency of timing crystal */
210 if ( device->chipset < 0x17 ||
211 (device->chipset >= 0x20 && device->chipset <= 0x25))
212 strap &= 0x00000040;
213 else
214 strap &= 0x00400040;
215
216 switch (strap) {
217 case 0x00000000: device->crystal = 13500; break;
218 case 0x00000040: device->crystal = 14318; break;
219 case 0x00400000: device->crystal = 27000; break;
220 case 0x00400040: device->crystal = 25000; break;
221 }
222
223 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
224 }
225
226 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
227 !nv_subdev(device)->mmio) {
228 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
229 if (!nv_subdev(device)->mmio) {
230 nv_error(device, "unable to map device registers\n");
231 return -ENOMEM;
232 }
233 }
234
235 /* ensure requested subsystems are available for use */
236 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
237 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
238 continue;
239
240 if (!device->subdev[i]) {
241 ret = nouveau_object_ctor(nv_object(device), NULL,
242 oclass, NULL, i,
243 &devobj->subdev[i]);
244 if (ret == -ENODEV)
245 continue;
246 if (ret)
247 return ret;
248
249 if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
250 nouveau_subdev_reset(devobj->subdev[i]);
251 } else {
252 nouveau_object_ref(device->subdev[i],
253 &devobj->subdev[i]);
254 }
255
256 /* note: can't init *any* subdevs until devinit has been run
257 * due to not knowing exactly what the vbios init tables will
258 * mess with. devinit also can't be run until all of its
259 * dependencies have been created.
260 *
261 * this code delays init of any subdev until all of devinit's
262 * dependencies have been created, and then initialises each
263 * subdev in turn as they're created.
264 */
265 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
266 struct nouveau_object *subdev = devobj->subdev[c++];
267 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
268 ret = nouveau_object_inc(subdev);
269 if (ret)
270 return ret;
271 }
272 }
273 }
274
275 return 0;
276}
277
278static void
279nouveau_devobj_dtor(struct nouveau_object *object)
280{
281 struct nouveau_devobj *devobj = (void *)object;
282 int i;
283
284 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
285 nouveau_object_ref(NULL, &devobj->subdev[i]);
286
287 nouveau_parent_destroy(&devobj->base);
288}
289
290static int
291nouveau_devobj_init(struct nouveau_object *object)
292{
293 struct nouveau_devobj *devobj = (void *)object;
294 struct nouveau_object *subdev;
295 int ret, i;
296
297 ret = nouveau_parent_init(&devobj->base);
298 if (ret)
299 return ret;
300
301 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
302 if ((subdev = devobj->subdev[i])) {
303 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
304 ret = nouveau_object_inc(subdev);
305 if (ret)
306 goto fail;
307 }
308 }
309 }
310
311 devobj->created = true;
312 return 0;
313
314fail:
315 for (--i; i >= 0; i--) {
316 if ((subdev = devobj->subdev[i])) {
317 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
318 nouveau_object_dec(subdev, false);
319 }
320 }
321
322 return ret;
323}
324
325static int
326nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
327{
328 struct nouveau_devobj *devobj = (void *)object;
329 struct nouveau_object *subdev;
330 int ret, i;
331
332 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
333 if ((subdev = devobj->subdev[i])) {
334 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
335 ret = nouveau_object_dec(subdev, suspend);
336 if (ret && suspend)
337 goto fail;
338 }
339 }
340 }
341
342 ret = nouveau_parent_fini(&devobj->base, suspend);
343fail:
344 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
345 if ((subdev = devobj->subdev[i])) {
346 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
347 ret = nouveau_object_inc(subdev);
348 if (ret) {
349 /* XXX */
350 }
351 }
352 }
353 }
354
355 return ret;
356}
357
358static u8
359nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
360{
361 return nv_rd08(object->engine, addr);
362}
363
364static u16
365nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
366{
367 return nv_rd16(object->engine, addr);
368}
369
370static u32
371nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
372{
373 return nv_rd32(object->engine, addr);
374}
375
376static void
377nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
378{
379 nv_wr08(object->engine, addr, data);
380}
381
382static void
383nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
384{
385 nv_wr16(object->engine, addr, data);
386}
387
388static void
389nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
390{
391 nv_wr32(object->engine, addr, data);
392}
393
394static struct nouveau_ofuncs
395nouveau_devobj_ofuncs = {
396 .ctor = nouveau_devobj_ctor,
397 .dtor = nouveau_devobj_dtor,
398 .init = nouveau_devobj_init,
399 .fini = nouveau_devobj_fini,
400 .rd08 = nouveau_devobj_rd08,
401 .rd16 = nouveau_devobj_rd16,
402 .rd32 = nouveau_devobj_rd32,
403 .wr08 = nouveau_devobj_wr08,
404 .wr16 = nouveau_devobj_wr16,
405 .wr32 = nouveau_devobj_wr32,
406};
407
408/******************************************************************************
409 * nouveau_device: engine functions
410 *****************************************************************************/
411struct nouveau_oclass
412nouveau_device_sclass[] = {
413 { 0x0080, &nouveau_devobj_ofuncs },
414 {}
415};
416
417static void
418nouveau_device_dtor(struct nouveau_object *object)
419{
420 struct nouveau_device *device = (void *)object;
421
422 mutex_lock(&nv_devices_mutex);
423 list_del(&device->head);
424 mutex_unlock(&nv_devices_mutex);
425
426 if (device->base.mmio)
427 iounmap(device->base.mmio);
428
429 nouveau_subdev_destroy(&device->base);
430}
431
432static struct nouveau_oclass
433nouveau_device_oclass = {
434 .handle = NV_SUBDEV(DEVICE, 0x00),
435 .ofuncs = &(struct nouveau_ofuncs) {
436 .dtor = nouveau_device_dtor,
437 },
438};
439
440int
441nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
442 const char *cfg, const char *dbg,
443 int length, void **pobject)
444{
445 struct nouveau_device *device;
446 int ret = -EEXIST;
447
448 mutex_lock(&nv_devices_mutex);
449 list_for_each_entry(device, &nv_devices, head) {
450 if (device->handle == name)
451 goto done;
452 }
453
454 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
455 "DEVICE", "device", length, pobject);
456 device = *pobject;
457 if (ret)
458 goto done;
459
460 atomic_set(&nv_object(device)->usecount, 2);
461 device->pdev = pdev;
462 device->handle = name;
463 device->cfgopt = cfg;
464 device->dbgopt = dbg;
465 device->name = sname;
466
467 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
468 list_add(&device->head, &nv_devices);
469done:
470 mutex_unlock(&nv_devices_mutex);
471 return ret;
472}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
new file mode 100644
index 000000000000..8626d0d6cbbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/i2c.h>
28#include <subdev/clock.h>
29#include <subdev/devinit.h>
30#include <subdev/mc.h>
31#include <subdev/timer.h>
32#include <subdev/fb.h>
33#include <subdev/instmem.h>
34#include <subdev/vm.h>
35
36#include <engine/dmaobj.h>
37#include <engine/fifo.h>
38#include <engine/software.h>
39#include <engine/graph.h>
40#include <engine/disp.h>
41
42int
43nv04_identify(struct nouveau_device *device)
44{
45 switch (device->chipset) {
46 case 0x04:
47 device->cname = "NV04";
48 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
49 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
50 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
51 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
52 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
53 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
54 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
55 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
56 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
57 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
58 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
59 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
60 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
61 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
62 break;
63 case 0x05:
64 device->cname = "NV05";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
69 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
70 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
71 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
74 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
75 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
76 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
77 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
78 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
79 break;
80 default:
81 nv_fatal(device, "unknown RIVA chipset\n");
82 return -EINVAL;
83 }
84
85 return 0;
86}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
new file mode 100644
index 000000000000..f09accfd0e31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/devinit.h>
31#include <subdev/mc.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/disp.h>
42
43int
44nv10_identify(struct nouveau_device *device)
45{
46 switch (device->chipset) {
47 case 0x10:
48 device->cname = "NV10";
49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
50 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
51 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
52 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
53 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
54 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
61 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
62 break;
63 case 0x15:
64 device->cname = "NV15";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
67 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
68 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
69 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
70 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
74 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
79 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
80 break;
81 case 0x16:
82 device->cname = "NV16";
83 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
84 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
85 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
86 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
87 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
88 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
89 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
90 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
91 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
92 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
93 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
94 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
95 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
96 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
97 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
98 break;
99 case 0x1a:
100 device->cname = "nForce";
101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
102 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
103 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
112 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
113 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
114 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
115 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
116 break;
117 case 0x11:
118 device->cname = "NV11";
119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
120 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
121 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
122 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
124 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
126 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
127 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
128 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
134 break;
135 case 0x17:
136 device->cname = "NV17";
137 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
138 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
139 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
141 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
142 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
144 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
146 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
147 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
148 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
149 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
150 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
152 break;
153 case 0x1f:
154 device->cname = "nForce2";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
166 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
167 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
168 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
169 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
170 break;
171 case 0x18:
172 device->cname = "NV18";
173 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
174 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
175 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
176 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
177 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
178 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
179 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
180 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
181 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
182 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
184 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
185 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
186 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
187 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
188 break;
189 default:
190 nv_fatal(device, "unknown Celsius chipset\n");
191 return -EINVAL;
192 }
193
194 return 0;
195}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
new file mode 100644
index 000000000000..5fa58b7369b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/software.h>
41#include <engine/graph.h>
42#include <engine/disp.h>
43
44int
45nv20_identify(struct nouveau_device *device)
46{
47 switch (device->chipset) {
48 case 0x20:
49 device->cname = "NV20";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
59 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
60 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
61 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
62 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
63 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
64 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
65 break;
66 case 0x25:
67 device->cname = "NV25";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break;
84 case 0x28:
85 device->cname = "NV28";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
100 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
101 break;
102 case 0x2a:
103 device->cname = "NV2A";
104 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
105 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
106 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
107 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
111 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
115 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
116 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
118 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
119 break;
120 default:
121 nv_fatal(device, "unknown Kelvin chipset\n");
122 return -EINVAL;
123 }
124
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
new file mode 100644
index 000000000000..7f4b8fe6cccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/devinit.h>
31#include <subdev/mc.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/mpeg.h>
42#include <engine/disp.h>
43
44int
45nv30_identify(struct nouveau_device *device)
46{
47 switch (device->chipset) {
48 case 0x30:
49 device->cname = "NV30";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
59 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
60 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
61 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
62 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
63 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
64 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
65 break;
66 case 0x35:
67 device->cname = "NV35";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break;
84 case 0x31:
85 device->cname = "NV31";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
100 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
102 break;
103 case 0x36:
104 device->cname = "NV36";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
112 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
116 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
117 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
118 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
119 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
121 break;
122 case 0x34:
123 device->cname = "NV34";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
131 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
135 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
136 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
137 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
138 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
140 break;
141 default:
142 nv_fatal(device, "unknown Rankine chipset\n");
143 return -EINVAL;
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
new file mode 100644
index 000000000000..42deadca0f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/software.h>
41#include <engine/graph.h>
42#include <engine/mpeg.h>
43#include <engine/disp.h>
44
45int
46nv40_identify(struct nouveau_device *device)
47{
48 switch (device->chipset) {
49 case 0x40:
50 device->cname = "NV40";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
56 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
57 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
66 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
67 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
68 break;
69 case 0x41:
70 device->cname = "NV41";
71 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
72 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
73 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
74 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
75 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
79 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
83 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
84 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
85 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
86 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
87 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break;
89 case 0x42:
90 device->cname = "NV42";
91 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
92 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
93 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
94 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
95 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
103 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
104 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
105 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
106 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
107 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
108 break;
109 case 0x43:
110 device->cname = "NV43";
111 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
112 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
113 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
114 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
115 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
123 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
124 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
125 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
126 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
127 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
128 break;
129 case 0x45:
130 device->cname = "NV45";
131 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
132 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
133 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
134 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
135 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
141 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
142 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
143 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
144 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
145 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
146 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
147 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
148 break;
149 case 0x47:
150 device->cname = "G70";
151 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
152 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
153 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
154 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
155 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
163 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
164 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
165 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
166 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
167 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
168 break;
169 case 0x49:
170 device->cname = "G71";
171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
173 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
174 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
183 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
184 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
185 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
186 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
187 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
188 break;
189 case 0x4b:
190 device->cname = "G73";
191 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
192 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
193 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
194 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
195 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
203 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
204 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
205 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
206 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
207 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
208 break;
209 case 0x44:
210 device->cname = "NV44";
211 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
212 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
213 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
214 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
215 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
223 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
224 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
225 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
226 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
227 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
228 break;
229 case 0x46:
230 device->cname = "G72";
231 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
232 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
233 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
234 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
235 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
243 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
244 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
245 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
246 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
247 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
248 break;
249 case 0x4a:
250 device->cname = "NV44A";
251 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
252 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
253 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
254 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
255 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
259 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
263 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
264 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
265 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
266 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
267 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
268 break;
269 case 0x4c:
270 device->cname = "C61";
271 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
272 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
273 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
274 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
275 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
279 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
283 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
284 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
285 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
286 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
287 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
288 break;
289 case 0x4e:
290 device->cname = "C51";
291 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
292 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
293 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
294 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
295 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
299 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
303 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
304 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
305 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
306 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
307 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
308 break;
309 case 0x63:
310 device->cname = "C73";
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
319 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
323 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
324 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
325 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
326 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
328 break;
329 case 0x67:
330 device->cname = "C67";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
339 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
343 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
344 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
345 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
346 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
347 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
348 break;
349 case 0x68:
350 device->cname = "C68";
351 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
352 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
353 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
354 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
355 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
359 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
363 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
364 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
365 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
366 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
367 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
368 break;
369 default:
370 nv_fatal(device, "unknown Curie chipset\n");
371 return -EINVAL;
372 }
373
374 return 0;
375}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
new file mode 100644
index 000000000000..fec3bcc9a6fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -0,0 +1,410 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/instmem.h>
37#include <subdev/vm.h>
38#include <subdev/bar.h>
39
40#include <engine/dmaobj.h>
41#include <engine/fifo.h>
42#include <engine/software.h>
43#include <engine/graph.h>
44#include <engine/mpeg.h>
45#include <engine/vp.h>
46#include <engine/crypt.h>
47#include <engine/bsp.h>
48#include <engine/ppp.h>
49#include <engine/copy.h>
50#include <engine/disp.h>
51
52int
53nv50_identify(struct nouveau_device *device)
54{
55 switch (device->chipset) {
56 case 0x50:
57 device->cname = "G80";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
69 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
70 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
71 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
72 device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
73 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
74 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
75 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
76 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
77 break;
78 case 0x84:
79 device->cname = "G84";
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
81 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
82 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
83 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
84 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
85 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
86 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
87 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
89 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
91 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
92 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
93 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
94 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
95 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
96 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
97 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
102 break;
103 case 0x86:
104 device->cname = "G86";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
110 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
111 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
112 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
113 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
114 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
115 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
116 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
117 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
118 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
119 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
120 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
121 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
122 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
126 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
127 break;
128 case 0x92:
129 device->cname = "G92";
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
131 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
132 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
133 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
134 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
135 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
141 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
142 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
143 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
144 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
145 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
146 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
147 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
152 break;
153 case 0x94:
154 device->cname = "G94";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
160 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
161 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
162 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
163 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
164 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
168 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
169 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
170 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
171 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
172 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
176 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
177 break;
178 case 0x96:
179 device->cname = "G96";
180 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
181 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
182 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
183 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
184 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
185 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
186 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
187 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
192 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
193 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
194 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
195 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
196 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
197 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
201 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
202 break;
203 case 0x98:
204 device->cname = "G98";
205 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
206 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
207 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
208 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
209 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
210 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
211 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
212 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
213 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
214 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
215 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
216 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
217 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
218 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
219 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
220 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
221 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
222 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
227 break;
228 case 0xa0:
229 device->cname = "G200";
230 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
231 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
232 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
233 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
234 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
235 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
242 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
243 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
244 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
245 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
246 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
247 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
251 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
252 break;
253 case 0xaa:
254 device->cname = "MCP77/MCP78";
255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
257 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
261 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
262 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
263 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
264 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
265 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
266 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
267 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
268 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
269 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
270 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
271 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
272 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
277 break;
278 case 0xac:
279 device->cname = "MCP79/MCP7A";
280 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
281 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
282 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
283 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
284 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
285 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
286 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
287 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
288 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
289 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
290 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
291 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
292 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
293 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
294 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
295 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
296 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
297 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
302 break;
303 case 0xa3:
304 device->cname = "GT215";
305 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
306 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
307 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
310 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
311 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
312 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
314 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
316 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
317 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
318 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
319 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
320 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
321 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
322 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
323 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
328 break;
329 case 0xa5:
330 device->cname = "GT216";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
337 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
338 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
340 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
341 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
342 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
343 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
345 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
346 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
347 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
348 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
352 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
353 break;
354 case 0xa8:
355 device->cname = "GT218";
356 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
357 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
358 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
359 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
360 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
361 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
362 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
363 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
364 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
365 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
366 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
367 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
368 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
369 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
370 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
371 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
372 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
373 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
377 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
378 break;
379 case 0xaf:
380 device->cname = "MCP89";
381 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
382 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
383 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
384 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
385 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
386 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
387 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
388 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
389 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
390 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
391 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
392 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
393 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
394 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
395 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
396 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
397 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
398 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
402 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
403 break;
404 default:
405 nv_fatal(device, "unknown Tesla chipset\n");
406 return -EINVAL;
407 }
408
409 return 0;
410}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
new file mode 100644
index 000000000000..6697f0f9c293
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/ltcg.h>
37#include <subdev/ibus.h>
38#include <subdev/instmem.h>
39#include <subdev/vm.h>
40#include <subdev/bar.h>
41
42#include <engine/dmaobj.h>
43#include <engine/fifo.h>
44#include <engine/software.h>
45#include <engine/graph.h>
46#include <engine/vp.h>
47#include <engine/bsp.h>
48#include <engine/ppp.h>
49#include <engine/copy.h>
50#include <engine/disp.h>
51
52int
53nvc0_identify(struct nouveau_device *device)
54{
55 switch (device->chipset) {
56 case 0xc0:
57 device->cname = "GF100";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
69 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
71 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
72 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
77 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
78 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
79 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
83 break;
84 case 0xc4:
85 device->cname = "GF104";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
91 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
92 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
93 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
94 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
95 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
96 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
97 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
100 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
101 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
105 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
106 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
107 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
110 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
111 break;
112 case 0xc3:
113 device->cname = "GF106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
125 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
128 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
134 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
135 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
138 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
139 break;
140 case 0xce:
141 device->cname = "GF114";
142 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
143 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
144 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
145 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
146 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
147 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
148 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
149 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
153 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
154 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
155 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
156 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
157 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
161 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
162 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
163 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
166 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
167 break;
168 case 0xcf:
169 device->cname = "GF116";
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
171 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
172 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
175 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
181 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
183 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
184 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
185 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
189 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
190 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
191 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
194 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
195 break;
196 case 0xc1:
197 device->cname = "GF108";
198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
199 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
200 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
201 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
202 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
203 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
204 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
205 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
206 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
207 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
208 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
209 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
211 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
212 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
213 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
217 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
218 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
223 break;
224 case 0xc8:
225 device->cname = "GF110";
226 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
227 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
228 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
229 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
230 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
231 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
232 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
233 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
234 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
235 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
236 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
237 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
238 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
239 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
240 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
241 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
245 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
246 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
247 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
250 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
251 break;
252 case 0xd9:
253 device->cname = "GF119";
254 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
255 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
256 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
257 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
258 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
259 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
260 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
261 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
262 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
263 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
264 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
265 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
273 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
278 break;
279 default:
280 nv_fatal(device, "unknown Fermi chipset\n");
281 return -EINVAL;
282 }
283
284 return 0;
285}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
new file mode 100644
index 000000000000..4a280b7ab853
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/ltcg.h>
37#include <subdev/ibus.h>
38#include <subdev/instmem.h>
39#include <subdev/vm.h>
40#include <subdev/bar.h>
41
42#include <engine/dmaobj.h>
43#include <engine/fifo.h>
44#include <engine/software.h>
45#include <engine/graph.h>
46#include <engine/disp.h>
47#include <engine/copy.h>
48
49int
50nve0_identify(struct nouveau_device *device)
51{
52 switch (device->chipset) {
53 case 0xe4:
54 device->cname = "GK104";
55 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
56 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
57 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
60 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
61 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
62 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
63 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
64 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
65 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
66 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
70 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
71 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
72 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
73 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
74 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
75 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
76 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
77 break;
78 case 0xe7:
79 device->cname = "GK107";
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
81 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
82 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
83 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
84 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
85 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
86 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
87 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
89 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
91 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
92 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
93 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
94 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
95 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
96 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
97 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
98 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
99 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
100 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
101 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
102 break;
103 default:
104 nv_fatal(device, "unknown Kepler chipset\n");
105 return -EINVAL;
106 }
107
108 return 0;
109}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index a987dd6e0036..5a07a39c1735 100644
--- a/drivers/gpu/drm/nouveau/nv98_ppp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,57 +22,48 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/option.h>
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30 26
31struct nv98_ppp_engine { 27#include <subdev/devinit.h>
32 struct nouveau_exec_engine base; 28#include <subdev/bios.h>
33}; 29#include <subdev/bios/init.h>
34 30
35static int 31int
36nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend) 32nouveau_devinit_init(struct nouveau_devinit *devinit)
37{ 33{
38 if (!(nv_rd32(dev, 0x000200) & 0x00000002)) 34 int ret = nouveau_subdev_init(&devinit->base);
39 return 0; 35 if (ret)
36 return ret;
40 37
41 nv_mask(dev, 0x000200, 0x00000002, 0x00000000); 38 return nvbios_init(&devinit->base, devinit->post);
42 return 0;
43} 39}
44 40
45static int 41int
46nv98_ppp_init(struct drm_device *dev, int engine) 42nouveau_devinit_fini(struct nouveau_devinit *devinit, bool suspend)
47{
48 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
50 return 0;
51}
52
53static void
54nv98_ppp_destroy(struct drm_device *dev, int engine)
55{ 43{
56 struct nv98_ppp_engine *pppp = nv_engine(dev, engine); 44 /* force full reinit on resume */
45 if (suspend)
46 devinit->post = true;
57 47
58 NVOBJ_ENGINE_DEL(dev, PPP); 48 return nouveau_subdev_fini(&devinit->base, suspend);
59
60 kfree(pppp);
61} 49}
62 50
63int 51int
64nv98_ppp_create(struct drm_device *dev) 52nouveau_devinit_create_(struct nouveau_object *parent,
53 struct nouveau_object *engine,
54 struct nouveau_oclass *oclass,
55 int size, void **pobject)
65{ 56{
66 struct nv98_ppp_engine *pppp; 57 struct nouveau_device *device = nv_device(parent);
67 58 struct nouveau_devinit *devinit;
68 pppp = kzalloc(sizeof(*pppp), GFP_KERNEL); 59 int ret;
69 if (!pppp)
70 return -ENOMEM;
71 60
72 pppp->base.destroy = nv98_ppp_destroy; 61 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
73 pppp->base.init = nv98_ppp_init; 62 "init", size, pobject);
74 pppp->base.fini = nv98_ppp_fini; 63 devinit = *pobject;
64 if (ret)
65 return ret;
75 66
76 NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base); 67 devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
77 return 0; 68 return 0;
78} 69}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
new file mode 100644
index 000000000000..6b56a0f4cb40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#define NV04_PFB_BOOT_0 0x00100000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
30# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
31# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
32# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
33# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
34# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
35# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
36# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
37# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
38# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
39# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
40# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
41# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
42# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
43#define NV04_PFB_DEBUG_0 0x00100080
44# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001
45# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010
46# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00
47# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000
48# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000
49# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000
50# define NV04_PFB_DEBUG_0_CASOE 0x00100000
51# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000
52# define NV04_PFB_DEBUG_0_REFINC 0x20000000
53# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000
54#define NV04_PFB_CFG0 0x00100200
55# define NV04_PFB_CFG0_SCRAMBLE 0x20000000
56#define NV04_PFB_CFG1 0x00100204
57#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
58
59#define NV10_PFB_REFCTRL 0x00100210
60# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
61
62static inline struct io_mapping *
63fbmem_init(struct pci_dev *pdev)
64{
65 return io_mapping_create_wc(pci_resource_start(pdev, 1),
66 pci_resource_len(pdev, 1));
67}
68
69static inline void
70fbmem_fini(struct io_mapping *fb)
71{
72 io_mapping_free(fb);
73}
74
75static inline u32
76fbmem_peek(struct io_mapping *fb, u32 off)
77{
78 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
79 u32 val = ioread32(p + (off & ~PAGE_MASK));
80 io_mapping_unmap_atomic(p);
81 return val;
82}
83
84static inline void
85fbmem_poke(struct io_mapping *fb, u32 off, u32 val)
86{
87 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
88 iowrite32(val, p + (off & ~PAGE_MASK));
89 wmb();
90 io_mapping_unmap_atomic(p);
91}
92
93static inline bool
94fbmem_readback(struct io_mapping *fb, u32 off, u32 val)
95{
96 fbmem_poke(fb, off, val);
97 return val == fbmem_peek(fb, off);
98}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
new file mode 100644
index 000000000000..7a72d9394340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv04_devinit_priv {
33 struct nouveau_devinit base;
34 int owner;
35};
36
37static void
38nv04_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv04_devinit_priv *priv = (void *)devinit;
41 u32 patt = 0xdeadbeef;
42 struct io_mapping *fb;
43 int i;
44
45 /* Map the framebuffer aperture */
46 fb = fbmem_init(nv_device(priv)->pdev);
47 if (!fb) {
48 nv_error(priv, "failed to map fb\n");
49 return;
50 }
51
52 /* Sequencer and refresh off */
53 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
54 nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
55
56 nv_mask(priv, NV04_PFB_BOOT_0, ~0,
57 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
58 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
59 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
60
61 for (i = 0; i < 4; i++)
62 fbmem_poke(fb, 4 * i, patt);
63
64 fbmem_poke(fb, 0x400000, patt + 1);
65
66 if (fbmem_peek(fb, 0) == patt + 1) {
67 nv_mask(priv, NV04_PFB_BOOT_0,
68 NV04_PFB_BOOT_0_RAM_TYPE,
69 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
70 nv_mask(priv, NV04_PFB_DEBUG_0,
71 NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
72
73 for (i = 0; i < 4; i++)
74 fbmem_poke(fb, 4 * i, patt);
75
76 if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
77 nv_mask(priv, NV04_PFB_BOOT_0,
78 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
79 NV04_PFB_BOOT_0_RAM_AMOUNT,
80 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
81 } else
82 if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
83 nv_mask(priv, NV04_PFB_BOOT_0,
84 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
85 NV04_PFB_BOOT_0_RAM_AMOUNT,
86 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
87 } else
88 if (fbmem_peek(fb, 0) != patt) {
89 if (fbmem_readback(fb, 0x800000, patt))
90 nv_mask(priv, NV04_PFB_BOOT_0,
91 NV04_PFB_BOOT_0_RAM_AMOUNT,
92 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
93 else
94 nv_mask(priv, NV04_PFB_BOOT_0,
95 NV04_PFB_BOOT_0_RAM_AMOUNT,
96 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
97
98 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
99 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
100 } else
101 if (!fbmem_readback(fb, 0x800000, patt)) {
102 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
103 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
104
105 }
106
107 /* Refresh on, sequencer on */
108 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
109 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
110 fbmem_fini(fb);
111}
112
113static int
114nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size,
116 struct nouveau_object **pobject)
117{
118 struct nv04_devinit_priv *priv;
119 int ret;
120
121 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
122 *pobject = nv_object(priv);
123 if (ret)
124 return ret;
125
126 priv->base.meminit = nv04_devinit_meminit;
127 priv->owner = -1;
128 return 0;
129}
130
131void
132nv04_devinit_dtor(struct nouveau_object *object)
133{
134 struct nv04_devinit_priv *priv = (void *)object;
135
136 /* restore vga owner saved at first init, and lock crtc regs */
137 nv_wrvgaowner(priv, priv->owner);
138 nv_lockvgac(priv, true);
139
140 nouveau_devinit_destroy(&priv->base);
141}
142
143int
144nv04_devinit_init(struct nouveau_object *object)
145{
146 struct nv04_devinit_priv *priv = (void *)object;
147
148 if (!priv->base.post) {
149 u32 htotal = nv_rdvgac(priv, 0, 0x06);
150 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
151 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
152 htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
153 htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
154 if (!htotal) {
155 nv_info(priv, "adaptor not initialised\n");
156 priv->base.post = true;
157 }
158 }
159
160 return nouveau_devinit_init(&priv->base);
161}
162
163int
164nv04_devinit_fini(struct nouveau_object *object, bool suspend)
165{
166 struct nv04_devinit_priv *priv = (void *)object;
167
168 /* make i2c busses accessible */
169 nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
170
171 /* unlock extended vga crtc regs, and unslave crtcs */
172 nv_lockvgac(priv, false);
173 if (priv->owner < 0)
174 priv->owner = nv_rdvgaowner(priv);
175 nv_wrvgaowner(priv, 0);
176
177 return nouveau_devinit_fini(&priv->base, suspend);
178}
179
180struct nouveau_oclass
181nv04_devinit_oclass = {
182 .handle = NV_SUBDEV(DEVINIT, 0x04),
183 .ofuncs = &(struct nouveau_ofuncs) {
184 .ctor = nv04_devinit_ctor,
185 .dtor = nv04_devinit_dtor,
186 .init = nv04_devinit_init,
187 .fini = nv04_devinit_fini,
188 },
189};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
new file mode 100644
index 000000000000..191447d0d252
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/bios.h>
29#include <subdev/bios/bmp.h>
30#include <subdev/vga.h>
31
32#include "fbmem.h"
33
34struct nv05_devinit_priv {
35 struct nouveau_devinit base;
36 u8 owner;
37};
38
39static void
40nv05_devinit_meminit(struct nouveau_devinit *devinit)
41{
42 static const u8 default_config_tab[][2] = {
43 { 0x24, 0x00 },
44 { 0x28, 0x00 },
45 { 0x24, 0x01 },
46 { 0x1f, 0x00 },
47 { 0x0f, 0x00 },
48 { 0x17, 0x00 },
49 { 0x06, 0x00 },
50 { 0x00, 0x00 }
51 };
52 struct nv05_devinit_priv *priv = (void *)devinit;
53 struct nouveau_bios *bios = nouveau_bios(priv);
54 struct io_mapping *fb;
55 u32 patt = 0xdeadbeef;
56 u16 data;
57 u8 strap, ramcfg[2];
58 int i, v;
59
60 /* Map the framebuffer aperture */
61 fb = fbmem_init(nv_device(priv)->pdev);
62 if (!fb) {
63 nv_error(priv, "failed to map fb\n");
64 return;
65 }
66
67 strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
68 if ((data = bmp_mem_init_table(bios))) {
69 ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
70 ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
71 } else {
72 ramcfg[0] = default_config_tab[strap][0];
73 ramcfg[1] = default_config_tab[strap][1];
74 }
75
76 /* Sequencer off */
77 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
78
79 if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
80 goto out;
81
82 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
83
84 /* If present load the hardcoded scrambling table */
85 if (data) {
86 for (i = 0, data += 0x10; i < 8; i++, data += 4) {
87 u32 scramble = nv_ro32(bios, data);
88 nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
89 }
90 }
91
92 /* Set memory type/width/length defaults depending on the straps */
93 nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
94
95 if (ramcfg[1] & 0x80)
96 nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
97
98 nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
99 nv_mask(priv, NV04_PFB_CFG1, 0, 1);
100
101 /* Probe memory bus width */
102 for (i = 0; i < 4; i++)
103 fbmem_poke(fb, 4 * i, patt);
104
105 if (fbmem_peek(fb, 0xc) != patt)
106 nv_mask(priv, NV04_PFB_BOOT_0,
107 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
108
109 /* Probe memory length */
110 v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
111
112 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
113 (!fbmem_readback(fb, 0x1000000, ++patt) ||
114 !fbmem_readback(fb, 0, ++patt)))
115 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
116 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
117
118 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
119 !fbmem_readback(fb, 0x800000, ++patt))
120 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
121 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
122
123 if (!fbmem_readback(fb, 0x400000, ++patt))
124 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
125 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
126
127out:
128 /* Sequencer on */
129 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
130 fbmem_fini(fb);
131}
132
133static int
134nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv05_devinit_priv *priv;
139 int ret;
140
141 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 priv->base.meminit = nv05_devinit_meminit;
147 return 0;
148}
149
150struct nouveau_oclass
151nv05_devinit_oclass = {
152 .handle = NV_SUBDEV(DEVINIT, 0x05),
153 .ofuncs = &(struct nouveau_ofuncs) {
154 .ctor = nv05_devinit_ctor,
155 .dtor = nv04_devinit_dtor,
156 .init = nv04_devinit_init,
157 .fini = nv04_devinit_fini,
158 },
159};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
new file mode 100644
index 000000000000..eb76ffab6b0c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv10_devinit_priv {
33 struct nouveau_devinit base;
34 u8 owner;
35};
36
37static void
38nv10_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv10_devinit_priv *priv = (void *)devinit;
41 const int mem_width[] = { 0x10, 0x00, 0x20 };
42 const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
43 uint32_t patt = 0xdeadbeef;
44 struct io_mapping *fb;
45 int i, j, k;
46
47 /* Map the framebuffer aperture */
48 fb = fbmem_init(nv_device(priv)->pdev);
49 if (!fb) {
50 nv_error(priv, "failed to map fb\n");
51 return;
52 }
53
54 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
55
56 /* Probe memory bus width */
57 for (i = 0; i < mem_width_count; i++) {
58 nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
59
60 for (j = 0; j < 4; j++) {
61 for (k = 0; k < 4; k++)
62 fbmem_poke(fb, 0x1c, 0);
63
64 fbmem_poke(fb, 0x1c, patt);
65 fbmem_poke(fb, 0x3c, 0);
66
67 if (fbmem_peek(fb, 0x1c) == patt)
68 goto mem_width_found;
69 }
70 }
71
72mem_width_found:
73 patt <<= 1;
74
75 /* Probe amount of installed memory */
76 for (i = 0; i < 4; i++) {
77 int off = nv_rd32(priv, 0x10020c) - 0x100000;
78
79 fbmem_poke(fb, off, patt);
80 fbmem_poke(fb, 0, 0);
81
82 fbmem_peek(fb, 0);
83 fbmem_peek(fb, 0);
84 fbmem_peek(fb, 0);
85 fbmem_peek(fb, 0);
86
87 if (fbmem_peek(fb, off) == patt)
88 goto amount_found;
89 }
90
91 /* IC missing - disable the upper half memory space. */
92 nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
93
94amount_found:
95 fbmem_fini(fb);
96}
97
98static int
99nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 struct nouveau_oclass *oclass, void *data, u32 size,
101 struct nouveau_object **pobject)
102{
103 struct nv10_devinit_priv *priv;
104 int ret;
105
106 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
107 *pobject = nv_object(priv);
108 if (ret)
109 return ret;
110
111 priv->base.meminit = nv10_devinit_meminit;
112 return 0;
113}
114
115struct nouveau_oclass
116nv10_devinit_oclass = {
117 .handle = NV_SUBDEV(DEVINIT, 0x10),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv10_devinit_ctor,
120 .dtor = nv04_devinit_dtor,
121 .init = nv04_devinit_init,
122 .fini = nv04_devinit_fini,
123 },
124};
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 1d083893a4d7..5b2ba630d913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2009 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,42 +18,41 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
21 */ 23 */
22 24
23#ifndef __NOUVEAU_I2C_H__ 25#include <subdev/devinit.h>
24#define __NOUVEAU_I2C_H__ 26#include <subdev/vga.h>
25
26#include <linux/i2c.h>
27#include <linux/i2c-algo-bit.h>
28#include "drm_dp_helper.h"
29
30#define NV_I2C_PORT(n) (0x00 + (n))
31#define NV_I2C_PORT_NUM 0x10
32#define NV_I2C_DEFAULT(n) (0x80 + (n))
33 27
34struct nouveau_i2c_chan { 28struct nv1a_devinit_priv {
35 struct i2c_adapter adapter; 29 struct nouveau_devinit base;
36 struct drm_device *dev; 30 u8 owner;
37 struct i2c_algo_bit_data bit;
38 struct list_head head;
39 u8 index;
40 u8 type;
41 u32 dcb;
42 u32 drive;
43 u32 sense;
44 u32 state;
45}; 31};
46 32
47int nouveau_i2c_init(struct drm_device *); 33static int
48void nouveau_i2c_fini(struct drm_device *); 34nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
49struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index); 35 struct nouveau_oclass *oclass, void *data, u32 size,
50bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); 36 struct nouveau_object **pobject)
51int nouveau_i2c_identify(struct drm_device *dev, const char *what, 37{
52 struct i2c_board_info *info, 38 struct nv1a_devinit_priv *priv;
53 bool (*match)(struct nouveau_i2c_chan *, 39 int ret;
54 struct i2c_board_info *),
55 int index);
56 40
57extern const struct i2c_algorithm nouveau_dp_i2c_algo; 41 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
58 45
59#endif /* __NOUVEAU_I2C_H__ */ 46 return 0;
47}
48
49struct nouveau_oclass
50nv1a_devinit_oclass = {
51 .handle = NV_SUBDEV(DEVINIT, 0x1a),
52 .ofuncs = &(struct nouveau_ofuncs) {
53 .ctor = nv1a_devinit_ctor,
54 .dtor = nv04_devinit_dtor,
55 .init = nv04_devinit_init,
56 .fini = nv04_devinit_fini,
57 },
58};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
new file mode 100644
index 000000000000..eb32e99005e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv20_devinit_priv {
33 struct nouveau_devinit base;
34 u8 owner;
35};
36
37static void
38nv20_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv20_devinit_priv *priv = (void *)devinit;
41 struct nouveau_device *device = nv_device(priv);
42 uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
43 uint32_t amount, off;
44 struct io_mapping *fb;
45
46 /* Map the framebuffer aperture */
47 fb = fbmem_init(nv_device(priv)->pdev);
48 if (!fb) {
49 nv_error(priv, "failed to map fb\n");
50 return;
51 }
52
53 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
54
55 /* Allow full addressing */
56 nv_mask(priv, NV04_PFB_CFG0, 0, mask);
57
58 amount = nv_rd32(priv, 0x10020c);
59 for (off = amount; off > 0x2000000; off -= 0x2000000)
60 fbmem_poke(fb, off - 4, off);
61
62 amount = nv_rd32(priv, 0x10020c);
63 if (amount != fbmem_peek(fb, amount - 4))
64 /* IC missing - disable the upper half memory space. */
65 nv_mask(priv, NV04_PFB_CFG0, mask, 0);
66
67 fbmem_fini(fb);
68}
69
70static int
71nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
72 struct nouveau_oclass *oclass, void *data, u32 size,
73 struct nouveau_object **pobject)
74{
75 struct nv20_devinit_priv *priv;
76 int ret;
77
78 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
79 *pobject = nv_object(priv);
80 if (ret)
81 return ret;
82
83 priv->base.meminit = nv20_devinit_meminit;
84 return 0;
85}
86
87struct nouveau_oclass
88nv20_devinit_oclass = {
89 .handle = NV_SUBDEV(DEVINIT, 0x20),
90 .ofuncs = &(struct nouveau_ofuncs) {
91 .ctor = nv20_devinit_ctor,
92 .dtor = nv04_devinit_dtor,
93 .init = nv04_devinit_init,
94 .fini = nv04_devinit_fini,
95 },
96};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
new file mode 100644
index 000000000000..61becfa732e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/devinit.h>
26#include <subdev/vga.h>
27
28struct nv50_devinit_priv {
29 struct nouveau_devinit base;
30};
31
32static int
33nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, void *data, u32 size,
35 struct nouveau_object **pobject)
36{
37 struct nv50_devinit_priv *priv;
38 int ret;
39
40 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
41 *pobject = nv_object(priv);
42 if (ret)
43 return ret;
44
45 return 0;
46}
47
48static void
49nv50_devinit_dtor(struct nouveau_object *object)
50{
51 struct nv50_devinit_priv *priv = (void *)object;
52 nouveau_devinit_destroy(&priv->base);
53}
54
55static int
56nv50_devinit_init(struct nouveau_object *object)
57{
58 struct nv50_devinit_priv *priv = (void *)object;
59
60 if (!priv->base.post) {
61 if (!nv_rdvgac(priv, 0, 0x00) &&
62 !nv_rdvgac(priv, 0, 0x1a)) {
63 nv_info(priv, "adaptor not initialised\n");
64 priv->base.post = true;
65 }
66 }
67
68 return nouveau_devinit_init(&priv->base);
69}
70
71static int
72nv50_devinit_fini(struct nouveau_object *object, bool suspend)
73{
74 struct nv50_devinit_priv *priv = (void *)object;
75 return nouveau_devinit_fini(&priv->base, suspend);
76}
77
78struct nouveau_oclass
79nv50_devinit_oclass = {
80 .handle = NV_SUBDEV(DEVINIT, 0x50),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv50_devinit_ctor,
83 .dtor = nv50_devinit_dtor,
84 .init = nv50_devinit_init,
85 .fini = nv50_devinit_fini,
86 },
87};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
new file mode 100644
index 000000000000..f0086de8af31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -0,0 +1,130 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/fb.h"
26#include "subdev/bios.h"
27#include "subdev/bios/bit.h"
28
29int
30nouveau_fb_bios_memtype(struct nouveau_bios *bios)
31{
32 struct bit_entry M;
33 u8 ramcfg;
34
35 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
36 if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
37 u16 table = nv_ro16(bios, M.offset + 3);
38 u8 version = nv_ro08(bios, table + 0);
39 u8 header = nv_ro08(bios, table + 1);
40 u8 record = nv_ro08(bios, table + 2);
41 u8 entries = nv_ro08(bios, table + 3);
42 if (table && version == 0x10 && ramcfg < entries) {
43 u16 entry = table + header + (ramcfg * record);
44 switch (nv_ro08(bios, entry) & 0x0f) {
45 case 0: return NV_MEM_TYPE_DDR2;
46 case 1: return NV_MEM_TYPE_DDR3;
47 case 2: return NV_MEM_TYPE_GDDR3;
48 case 3: return NV_MEM_TYPE_GDDR5;
49 default:
50 break;
51 }
52
53 }
54 }
55
56 return NV_MEM_TYPE_UNKNOWN;
57}
58
59int
60nouveau_fb_init(struct nouveau_fb *pfb)
61{
62 int ret, i;
63
64 ret = nouveau_subdev_init(&pfb->base);
65 if (ret)
66 return ret;
67
68 for (i = 0; i < pfb->tile.regions; i++)
69 pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
70
71 return 0;
72}
73
74int
75_nouveau_fb_init(struct nouveau_object *object)
76{
77 struct nouveau_fb *pfb = (void *)object;
78 return nouveau_fb_init(pfb);
79}
80
81void
82nouveau_fb_destroy(struct nouveau_fb *pfb)
83{
84 int i;
85
86 for (i = 0; i < pfb->tile.regions; i++)
87 pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
88
89 if (pfb->tags.block_size)
90 nouveau_mm_fini(&pfb->tags);
91
92 if (pfb->vram.block_size)
93 nouveau_mm_fini(&pfb->vram);
94
95 nouveau_subdev_destroy(&pfb->base);
96}
97
98void
99_nouveau_fb_dtor(struct nouveau_object *object)
100{
101 struct nouveau_fb *pfb = (void *)object;
102 nouveau_fb_destroy(pfb);
103}
104
105int
106nouveau_fb_created(struct nouveau_fb *pfb)
107{
108 static const char *name[] = {
109 [NV_MEM_TYPE_UNKNOWN] = "unknown",
110 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
111 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
112 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
113 [NV_MEM_TYPE_DDR1 ] = "DDR1",
114 [NV_MEM_TYPE_DDR2 ] = "DDR2",
115 [NV_MEM_TYPE_DDR3 ] = "DDR3",
116 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
117 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
118 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
119 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
120 };
121
122 if (pfb->ram.size == 0) {
123 nv_fatal(pfb, "no vram detected!!\n");
124 return -ERANGE;
125 }
126
127 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
128 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
129 return 0;
130}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
new file mode 100644
index 000000000000..eb06836b69f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -0,0 +1,130 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26
27#define NV04_PFB_BOOT_0 0x00100000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
30# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
31# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
32# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
33# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
34# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
35# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
36# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
37# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
38# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
39# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
40# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
41# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
42# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
43#define NV04_PFB_CFG0 0x00100200
44
45struct nv04_fb_priv {
46 struct nouveau_fb base;
47};
48
49bool
50nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
51{
52 if (!(tile_flags & 0xff00))
53 return true;
54
55 return false;
56}
57
58static int
59nv04_fb_init(struct nouveau_object *object)
60{
61 struct nv04_fb_priv *priv = (void *)object;
62 int ret;
63
64 ret = nouveau_fb_init(&priv->base);
65 if (ret)
66 return ret;
67
68 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
69 * nvidia reading PFB_CFG_0, then writing back its original value.
70 * (which was 0x701114 in this case)
71 */
72 nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
73 return 0;
74}
75
76static int
77nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
78 struct nouveau_oclass *oclass, void *data, u32 size,
79 struct nouveau_object **pobject)
80{
81 struct nv04_fb_priv *priv;
82 u32 boot0;
83 int ret;
84
85 ret = nouveau_fb_create(parent, engine, oclass, &priv);
86 *pobject = nv_object(priv);
87 if (ret)
88 return ret;
89
90 boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
91 if (boot0 & 0x00000100) {
92 priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
93 priv->base.ram.size *= 1024 * 1024;
94 } else {
95 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
96 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
97 priv->base.ram.size = 32 * 1024 * 1024;
98 break;
99 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
100 priv->base.ram.size = 16 * 1024 * 1024;
101 break;
102 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
103 priv->base.ram.size = 8 * 1024 * 1024;
104 break;
105 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
106 priv->base.ram.size = 4 * 1024 * 1024;
107 break;
108 }
109 }
110
111 if ((boot0 & 0x00000038) <= 0x10)
112 priv->base.ram.type = NV_MEM_TYPE_SGRAM;
113 else
114 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
115
116
117 priv->base.memtype_valid = nv04_fb_memtype_valid;
118 return nouveau_fb_created(&priv->base);
119}
120
121struct nouveau_oclass
122nv04_fb_oclass = {
123 .handle = NV_SUBDEV(FB, 0x04),
124 .ofuncs = &(struct nouveau_ofuncs) {
125 .ctor = nv04_fb_ctor,
126 .dtor = _nouveau_fb_dtor,
127 .init = nv04_fb_init,
128 .fini = _nouveau_fb_fini,
129 },
130};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
new file mode 100644
index 000000000000..f037a422d2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv10_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 tile->addr = 0x80000000 | addr;
38 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch;
40}
41
42static void
43nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 tile->addr = 0;
46 tile->limit = 0;
47 tile->pitch = 0;
48 tile->zcomp = 0;
49}
50
51void
52nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
53{
54 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
55 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
56 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
57}
58
59static int
60nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 struct nouveau_oclass *oclass, void *data, u32 size,
62 struct nouveau_object **pobject)
63{
64 struct nouveau_device *device = nv_device(parent);
65 struct nv10_fb_priv *priv;
66 int ret;
67
68 ret = nouveau_fb_create(parent, engine, oclass, &priv);
69 *pobject = nv_object(priv);
70 if (ret)
71 return ret;
72
73 if (device->chipset == 0x1a || device->chipset == 0x1f) {
74 struct pci_dev *bridge;
75 u32 mem, mib;
76
77 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
78 if (!bridge) {
79 nv_fatal(device, "no bridge device\n");
80 return 0;
81 }
82
83 if (device->chipset == 0x1a) {
84 pci_read_config_dword(bridge, 0x7c, &mem);
85 mib = ((mem >> 6) & 31) + 1;
86 } else {
87 pci_read_config_dword(bridge, 0x84, &mem);
88 mib = ((mem >> 4) & 127) + 1;
89 }
90
91 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
92 priv->base.ram.size = mib * 1024 * 1024;
93 } else {
94 u32 cfg0 = nv_rd32(priv, 0x100200);
95 if (cfg0 & 0x00000001)
96 priv->base.ram.type = NV_MEM_TYPE_DDR1;
97 else
98 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
99
100 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
101 }
102
103 priv->base.memtype_valid = nv04_fb_memtype_valid;
104 priv->base.tile.regions = 8;
105 priv->base.tile.init = nv10_fb_tile_init;
106 priv->base.tile.fini = nv10_fb_tile_fini;
107 priv->base.tile.prog = nv10_fb_tile_prog;
108 return nouveau_fb_created(&priv->base);
109}
110
111struct nouveau_oclass
112nv10_fb_oclass = {
113 .handle = NV_SUBDEV(FB, 0x10),
114 .ofuncs = &(struct nouveau_ofuncs) {
115 .ctor = nv10_fb_ctor,
116 .dtor = _nouveau_fb_dtor,
117 .init = _nouveau_fb_init,
118 .fini = _nouveau_fb_fini,
119 },
120};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
new file mode 100644
index 000000000000..4b3578fcb7fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv20_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 struct nouveau_device *device = nv_device(pfb);
38 int bpp = (flags & 2) ? 32 : 16;
39
40 tile->addr = 0x00000001 | addr;
41 tile->limit = max(1u, addr + size) - 1;
42 tile->pitch = pitch;
43
44 /* Allocate some of the on-die tag memory, used to store Z
45 * compression meta-data (most likely just a bitmap determining
46 * if a given tile is compressed or not).
47 */
48 size /= 256;
49 if (flags & 4) {
50 if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
51 /* Enable Z compression */
52 tile->zcomp = tile->tag->offset;
53 if (device->chipset >= 0x25) {
54 if (bpp == 16)
55 tile->zcomp |= 0x00100000;
56 else
57 tile->zcomp |= 0x00200000;
58 } else {
59 tile->zcomp |= 0x80000000;
60 if (bpp != 16)
61 tile->zcomp |= 0x04000000;
62 }
63 }
64
65 tile->addr |= 2;
66 }
67}
68
69static void
70nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
71{
72 tile->addr = 0;
73 tile->limit = 0;
74 tile->pitch = 0;
75 tile->zcomp = 0;
76 nouveau_mm_free(&pfb->tags, &tile->tag);
77}
78
79static void
80nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
81{
82 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
83 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
84 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
85 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
86}
87
88static int
89nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 struct nouveau_oclass *oclass, void *data, u32 size,
91 struct nouveau_object **pobject)
92{
93 struct nouveau_device *device = nv_device(parent);
94 struct nv20_fb_priv *priv;
95 u32 pbus1218;
96 int ret;
97
98 ret = nouveau_fb_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 pbus1218 = nv_rd32(priv, 0x001218);
104 switch (pbus1218 & 0x00000300) {
105 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
106 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
107 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
108 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
109 }
110 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
111
112 if (device->chipset >= 0x25)
113 ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
114 else
115 ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
116 if (ret)
117 return ret;
118
119 priv->base.memtype_valid = nv04_fb_memtype_valid;
120 priv->base.tile.regions = 8;
121 priv->base.tile.init = nv20_fb_tile_init;
122 priv->base.tile.fini = nv20_fb_tile_fini;
123 priv->base.tile.prog = nv20_fb_tile_prog;
124 return nouveau_fb_created(&priv->base);
125}
126
127struct nouveau_oclass
128nv20_fb_oclass = {
129 .handle = NV_SUBDEV(FB, 0x20),
130 .ofuncs = &(struct nouveau_ofuncs) {
131 .ctor = nv20_fb_ctor,
132 .dtor = _nouveau_fb_dtor,
133 .init = _nouveau_fb_init,
134 .fini = _nouveau_fb_fini,
135 },
136};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
new file mode 100644
index 000000000000..cba67bc91390
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -0,0 +1,148 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv30_fb_priv {
30 struct nouveau_fb base;
31};
32
33void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 tile->addr = addr | 1;
38 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch;
40}
41
42void
43nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 tile->addr = 0;
46 tile->limit = 0;
47 tile->pitch = 0;
48}
49
50static int
51calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
52{
53 struct nouveau_device *device = nv_device(priv);
54 int b = (device->chipset > 0x30 ?
55 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
56 0) & 0xf;
57
58 return 2 * (b & 0x8 ? b - 0x10 : b);
59}
60
61static int
62calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
63{
64 int j, x = 0;
65
66 for (j = 0; j < 4; j++) {
67 int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
68
69 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
70 }
71
72 return x;
73}
74
75static int
76nv30_fb_init(struct nouveau_object *object)
77{
78 struct nouveau_device *device = nv_device(object);
79 struct nv30_fb_priv *priv = (void *)object;
80 int ret, i, j;
81
82 ret = nouveau_fb_init(&priv->base);
83 if (ret)
84 return ret;
85
86 /* Init the memory timing regs at 0x10037c/0x1003ac */
87 if (device->chipset == 0x30 ||
88 device->chipset == 0x31 ||
89 device->chipset == 0x35) {
90 /* Related to ROP count */
91 int n = (device->chipset == 0x31 ? 2 : 4);
92 int l = nv_rd32(priv, 0x1003d0);
93
94 for (i = 0; i < n; i++) {
95 for (j = 0; j < 3; j++)
96 nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
97 calc_ref(priv, l, 0, j));
98
99 for (j = 0; j < 2; j++)
100 nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
101 calc_ref(priv, l, 1, j));
102 }
103 }
104
105 return 0;
106}
107
108static int
109nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nv30_fb_priv *priv;
114 u32 pbus1218;
115 int ret;
116
117 ret = nouveau_fb_create(parent, engine, oclass, &priv);
118 *pobject = nv_object(priv);
119 if (ret)
120 return ret;
121
122 pbus1218 = nv_rd32(priv, 0x001218);
123 switch (pbus1218 & 0x00000300) {
124 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
125 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
126 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
127 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
128 }
129 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
130
131 priv->base.memtype_valid = nv04_fb_memtype_valid;
132 priv->base.tile.regions = 8;
133 priv->base.tile.init = nv30_fb_tile_init;
134 priv->base.tile.fini = nv30_fb_tile_fini;
135 priv->base.tile.prog = nv10_fb_tile_prog;
136 return nouveau_fb_created(&priv->base);
137}
138
139struct nouveau_oclass
140nv30_fb_oclass = {
141 .handle = NV_SUBDEV(FB, 0x30),
142 .ofuncs = &(struct nouveau_ofuncs) {
143 .ctor = nv30_fb_ctor,
144 .dtor = _nouveau_fb_dtor,
145 .init = nv30_fb_init,
146 .fini = _nouveau_fb_fini,
147 },
148};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
new file mode 100644
index 000000000000..347a496fcad8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv40_fb_priv {
30 struct nouveau_fb base;
31};
32
33static inline int
34nv44_graph_class(struct nouveau_device *device)
35{
36 if ((device->chipset & 0xf0) == 0x60)
37 return 1;
38
39 return !(0x0baf & (1 << (device->chipset & 0x0f)));
40}
41
42static void
43nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
46 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
47 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
48}
49
50static void
51nv40_fb_init_gart(struct nv40_fb_priv *priv)
52{
53 nv_wr32(priv, 0x100800, 0x00000001);
54}
55
56static void
57nv44_fb_init_gart(struct nv40_fb_priv *priv)
58{
59 nv_wr32(priv, 0x100850, 0x80000000);
60 nv_wr32(priv, 0x100800, 0x00000001);
61}
62
63static int
64nv40_fb_init(struct nouveau_object *object)
65{
66 struct nv40_fb_priv *priv = (void *)object;
67 int ret;
68
69 ret = nouveau_fb_init(&priv->base);
70 if (ret)
71 return ret;
72
73 switch (nv_device(priv)->chipset) {
74 case 0x40:
75 case 0x45:
76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
77 break;
78 default:
79 if (nv44_graph_class(nv_device(priv)))
80 nv44_fb_init_gart(priv);
81 else
82 nv40_fb_init_gart(priv);
83 break;
84 }
85
86 return 0;
87}
88
89static int
90nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 struct nouveau_oclass *oclass, void *data, u32 size,
92 struct nouveau_object **pobject)
93{
94 struct nouveau_device *device = nv_device(parent);
95 struct nv40_fb_priv *priv;
96 int ret;
97
98 ret = nouveau_fb_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 /* 0x001218 is actually present on a few other NV4X I looked at,
104 * and even contains sane values matching 0x100474. From looking
105 * at various vbios images however, this isn't the case everywhere.
106 * So, I chose to use the same regs I've seen NVIDIA reading around
107 * the memory detection, hopefully that'll get us the right numbers
108 */
109 if (device->chipset == 0x40) {
110 u32 pbus1218 = nv_rd32(priv, 0x001218);
111 switch (pbus1218 & 0x00000300) {
112 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
113 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
114 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
115 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
116 }
117 } else
118 if (device->chipset == 0x49 || device->chipset == 0x4b) {
119 u32 pfb914 = nv_rd32(priv, 0x100914);
120 switch (pfb914 & 0x00000003) {
121 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
122 case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
123 case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
124 case 0x00000003: break;
125 }
126 } else
127 if (device->chipset != 0x4e) {
128 u32 pfb474 = nv_rd32(priv, 0x100474);
129 if (pfb474 & 0x00000004)
130 priv->base.ram.type = NV_MEM_TYPE_GDDR3;
131 if (pfb474 & 0x00000002)
132 priv->base.ram.type = NV_MEM_TYPE_DDR2;
133 if (pfb474 & 0x00000001)
134 priv->base.ram.type = NV_MEM_TYPE_DDR1;
135 } else {
136 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
137 }
138
139 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
140
141 priv->base.memtype_valid = nv04_fb_memtype_valid;
142 switch (device->chipset) {
143 case 0x40:
144 case 0x45:
145 priv->base.tile.regions = 8;
146 break;
147 case 0x46:
148 case 0x47:
149 case 0x49:
150 case 0x4b:
151 case 0x4c:
152 priv->base.tile.regions = 15;
153 break;
154 default:
155 priv->base.tile.regions = 12;
156 break;
157 }
158 priv->base.tile.init = nv30_fb_tile_init;
159 priv->base.tile.fini = nv30_fb_tile_fini;
160 if (device->chipset == 0x40)
161 priv->base.tile.prog = nv10_fb_tile_prog;
162 else
163 priv->base.tile.prog = nv40_fb_tile_prog;
164
165 return nouveau_fb_created(&priv->base);
166}
167
168
169struct nouveau_oclass
170nv40_fb_oclass = {
171 .handle = NV_SUBDEV(FB, 0x40),
172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nv40_fb_ctor,
174 .dtor = _nouveau_fb_dtor,
175 .init = nv40_fb_init,
176 .fini = _nouveau_fb_fini,
177 },
178};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
new file mode 100644
index 000000000000..436e9efe7ef5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -0,0 +1,498 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/enum.h>
27
28#include <subdev/fb.h>
29#include <subdev/bios.h>
30
31struct nv50_fb_priv {
32 struct nouveau_fb base;
33 struct page *r100c08_page;
34 dma_addr_t r100c08;
35};
36
37static int types[0x80] = {
38 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
40 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
43 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
45 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
46};
47
48static bool
49nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
50{
51 return types[(memtype & 0xff00) >> 8] != 0;
52}
53
54static int
55nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
56 u32 memtype, struct nouveau_mem **pmem)
57{
58 struct nv50_fb_priv *priv = (void *)pfb;
59 struct nouveau_mm *heap = &priv->base.vram;
60 struct nouveau_mm *tags = &priv->base.tags;
61 struct nouveau_mm_node *r;
62 struct nouveau_mem *mem;
63 int comp = (memtype & 0x300) >> 8;
64 int type = (memtype & 0x07f);
65 int back = (memtype & 0x800);
66 int min, max, ret;
67
68 max = (size >> 12);
69 min = ncmin ? (ncmin >> 12) : max;
70 align >>= 12;
71
72 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
73 if (!mem)
74 return -ENOMEM;
75
76 mutex_lock(&pfb->base.mutex);
77 if (comp) {
78 if (align == 16) {
79 int n = (max >> 4) * comp;
80
81 ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag);
82 if (ret)
83 mem->tag = NULL;
84 }
85
86 if (unlikely(!mem->tag))
87 comp = 0;
88 }
89
90 INIT_LIST_HEAD(&mem->regions);
91 mem->memtype = (comp << 7) | type;
92 mem->size = max;
93
94 type = types[type];
95 do {
96 if (back)
97 ret = nouveau_mm_tail(heap, type, max, min, align, &r);
98 else
99 ret = nouveau_mm_head(heap, type, max, min, align, &r);
100 if (ret) {
101 mutex_unlock(&pfb->base.mutex);
102 pfb->ram.put(pfb, &mem);
103 return ret;
104 }
105
106 list_add_tail(&r->rl_entry, &mem->regions);
107 max -= r->length;
108 } while (max);
109 mutex_unlock(&pfb->base.mutex);
110
111 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
112 mem->offset = (u64)r->offset << 12;
113 *pmem = mem;
114 return 0;
115}
116
117void
118nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
119{
120 struct nv50_fb_priv *priv = (void *)pfb;
121 struct nouveau_mm_node *this;
122 struct nouveau_mem *mem;
123
124 mem = *pmem;
125 *pmem = NULL;
126 if (unlikely(mem == NULL))
127 return;
128
129 mutex_lock(&pfb->base.mutex);
130 while (!list_empty(&mem->regions)) {
131 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
132
133 list_del(&this->rl_entry);
134 nouveau_mm_free(&priv->base.vram, &this);
135 }
136
137 nouveau_mm_free(&priv->base.tags, &mem->tag);
138 mutex_unlock(&pfb->base.mutex);
139
140 kfree(mem);
141}
142
143static u32
144nv50_vram_rblock(struct nv50_fb_priv *priv)
145{
146 int i, parts, colbits, rowbitsa, rowbitsb, banks;
147 u64 rowsize, predicted;
148 u32 r0, r4, rt, ru, rblock_size;
149
150 r0 = nv_rd32(priv, 0x100200);
151 r4 = nv_rd32(priv, 0x100204);
152 rt = nv_rd32(priv, 0x100250);
153 ru = nv_rd32(priv, 0x001540);
154 nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
155
156 for (i = 0, parts = 0; i < 8; i++) {
157 if (ru & (0x00010000 << i))
158 parts++;
159 }
160
161 colbits = (r4 & 0x0000f000) >> 12;
162 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
163 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
164 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
165
166 rowsize = parts * banks * (1 << colbits) * 8;
167 predicted = rowsize << rowbitsa;
168 if (r0 & 0x00000004)
169 predicted += rowsize << rowbitsb;
170
171 if (predicted != priv->base.ram.size) {
172 nv_warn(priv, "memory controller reports %d MiB VRAM\n",
173 (u32)(priv->base.ram.size >> 20));
174 }
175
176 rblock_size = rowsize;
177 if (rt & 1)
178 rblock_size *= 3;
179
180 nv_debug(priv, "rblock %d bytes\n", rblock_size);
181 return rblock_size;
182}
183
184static int
185nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
186 struct nouveau_oclass *oclass, void *data, u32 size,
187 struct nouveau_object **pobject)
188{
189 struct nouveau_device *device = nv_device(parent);
190 struct nouveau_bios *bios = nouveau_bios(device);
191 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
192 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
193 struct nv50_fb_priv *priv;
194 u32 tags;
195 int ret;
196
197 ret = nouveau_fb_create(parent, engine, oclass, &priv);
198 *pobject = nv_object(priv);
199 if (ret)
200 return ret;
201
202 switch (nv_rd32(priv, 0x100714) & 0x00000007) {
203 case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
204 case 1:
205 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
206 priv->base.ram.type = NV_MEM_TYPE_DDR3;
207 else
208 priv->base.ram.type = NV_MEM_TYPE_DDR2;
209 break;
210 case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
211 case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
212 case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
213 default:
214 break;
215 }
216
217 priv->base.ram.size = nv_rd32(priv, 0x10020c);
218 priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
219 ((priv->base.ram.size & 0x000000ff) << 32);
220
221 tags = nv_rd32(priv, 0x100320);
222 if (tags) {
223 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
224 if (ret)
225 return ret;
226
227 nv_debug(priv, "%d compression tags\n", tags);
228 }
229
230 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
231 switch (device->chipset) {
232 case 0xaa:
233 case 0xac:
234 case 0xaf: /* IGPs, no reordering, no real VRAM */
235 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
236 if (ret)
237 return ret;
238
239 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
240 break;
241 default:
242 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
243 nv50_vram_rblock(priv) >> 12);
244 if (ret)
245 return ret;
246
247 priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
248 break;
249 }
250
251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
252 if (priv->r100c08_page) {
253 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
254 0, PAGE_SIZE,
255 PCI_DMA_BIDIRECTIONAL);
256 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
257 nv_warn(priv, "failed 0x100c08 page map\n");
258 } else {
259 nv_warn(priv, "failed 0x100c08 page alloc\n");
260 }
261
262 priv->base.memtype_valid = nv50_fb_memtype_valid;
263 priv->base.ram.get = nv50_fb_vram_new;
264 priv->base.ram.put = nv50_fb_vram_del;
265 return nouveau_fb_created(&priv->base);
266}
267
268static void
269nv50_fb_dtor(struct nouveau_object *object)
270{
271 struct nouveau_device *device = nv_device(object);
272 struct nv50_fb_priv *priv = (void *)object;
273
274 if (priv->r100c08_page) {
275 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
276 PCI_DMA_BIDIRECTIONAL);
277 __free_page(priv->r100c08_page);
278 }
279
280 nouveau_mm_fini(&priv->base.vram);
281 nouveau_fb_destroy(&priv->base);
282}
283
284static int
285nv50_fb_init(struct nouveau_object *object)
286{
287 struct nouveau_device *device = nv_device(object);
288 struct nv50_fb_priv *priv = (void *)object;
289 int ret;
290
291 ret = nouveau_fb_init(&priv->base);
292 if (ret)
293 return ret;
294
295 /* Not a clue what this is exactly. Without pointing it at a
296 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
297 * cause IOMMU "read from address 0" errors (rh#561267)
298 */
299 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
300
301 /* This is needed to get meaningful information from 100c90
302 * on traps. No idea what these values mean exactly. */
303 switch (device->chipset) {
304 case 0x50:
305 nv_wr32(priv, 0x100c90, 0x000707ff);
306 break;
307 case 0xa3:
308 case 0xa5:
309 case 0xa8:
310 nv_wr32(priv, 0x100c90, 0x000d0fff);
311 break;
312 case 0xaf:
313 nv_wr32(priv, 0x100c90, 0x089d1fff);
314 break;
315 default:
316 nv_wr32(priv, 0x100c90, 0x001d07ff);
317 break;
318 }
319
320 return 0;
321}
322
323struct nouveau_oclass
324nv50_fb_oclass = {
325 .handle = NV_SUBDEV(FB, 0x50),
326 .ofuncs = &(struct nouveau_ofuncs) {
327 .ctor = nv50_fb_ctor,
328 .dtor = nv50_fb_dtor,
329 .init = nv50_fb_init,
330 .fini = _nouveau_fb_fini,
331 },
332};
333
334static const struct nouveau_enum vm_dispatch_subclients[] = {
335 { 0x00000000, "GRCTX", NULL },
336 { 0x00000001, "NOTIFY", NULL },
337 { 0x00000002, "QUERY", NULL },
338 { 0x00000003, "COND", NULL },
339 { 0x00000004, "M2M_IN", NULL },
340 { 0x00000005, "M2M_OUT", NULL },
341 { 0x00000006, "M2M_NOTIFY", NULL },
342 {}
343};
344
345static const struct nouveau_enum vm_ccache_subclients[] = {
346 { 0x00000000, "CB", NULL },
347 { 0x00000001, "TIC", NULL },
348 { 0x00000002, "TSC", NULL },
349 {}
350};
351
352static const struct nouveau_enum vm_prop_subclients[] = {
353 { 0x00000000, "RT0", NULL },
354 { 0x00000001, "RT1", NULL },
355 { 0x00000002, "RT2", NULL },
356 { 0x00000003, "RT3", NULL },
357 { 0x00000004, "RT4", NULL },
358 { 0x00000005, "RT5", NULL },
359 { 0x00000006, "RT6", NULL },
360 { 0x00000007, "RT7", NULL },
361 { 0x00000008, "ZETA", NULL },
362 { 0x00000009, "LOCAL", NULL },
363 { 0x0000000a, "GLOBAL", NULL },
364 { 0x0000000b, "STACK", NULL },
365 { 0x0000000c, "DST2D", NULL },
366 {}
367};
368
369static const struct nouveau_enum vm_pfifo_subclients[] = {
370 { 0x00000000, "PUSHBUF", NULL },
371 { 0x00000001, "SEMAPHORE", NULL },
372 {}
373};
374
375static const struct nouveau_enum vm_bar_subclients[] = {
376 { 0x00000000, "FB", NULL },
377 { 0x00000001, "IN", NULL },
378 {}
379};
380
381static const struct nouveau_enum vm_client[] = {
382 { 0x00000000, "STRMOUT", NULL },
383 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
384 { 0x00000004, "PFIFO_WRITE", NULL },
385 { 0x00000005, "CCACHE", vm_ccache_subclients },
386 { 0x00000006, "PPPP", NULL },
387 { 0x00000007, "CLIPID", NULL },
388 { 0x00000008, "PFIFO_READ", NULL },
389 { 0x00000009, "VFETCH", NULL },
390 { 0x0000000a, "TEXTURE", NULL },
391 { 0x0000000b, "PROP", vm_prop_subclients },
392 { 0x0000000c, "PVP", NULL },
393 { 0x0000000d, "PBSP", NULL },
394 { 0x0000000e, "PCRYPT", NULL },
395 { 0x0000000f, "PCOUNTER", NULL },
396 { 0x00000011, "PDAEMON", NULL },
397 {}
398};
399
400static const struct nouveau_enum vm_engine[] = {
401 { 0x00000000, "PGRAPH", NULL },
402 { 0x00000001, "PVP", NULL },
403 { 0x00000004, "PEEPHOLE", NULL },
404 { 0x00000005, "PFIFO", vm_pfifo_subclients },
405 { 0x00000006, "BAR", vm_bar_subclients },
406 { 0x00000008, "PPPP", NULL },
407 { 0x00000009, "PBSP", NULL },
408 { 0x0000000a, "PCRYPT", NULL },
409 { 0x0000000b, "PCOUNTER", NULL },
410 { 0x0000000c, "SEMAPHORE_BG", NULL },
411 { 0x0000000d, "PCOPY", NULL },
412 { 0x0000000e, "PDAEMON", NULL },
413 {}
414};
415
416static const struct nouveau_enum vm_fault[] = {
417 { 0x00000000, "PT_NOT_PRESENT", NULL },
418 { 0x00000001, "PT_TOO_SHORT", NULL },
419 { 0x00000002, "PAGE_NOT_PRESENT", NULL },
420 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
421 { 0x00000004, "PAGE_READ_ONLY", NULL },
422 { 0x00000006, "NULL_DMAOBJ", NULL },
423 { 0x00000007, "WRONG_MEMTYPE", NULL },
424 { 0x0000000b, "VRAM_LIMIT", NULL },
425 { 0x0000000f, "DMAOBJ_LIMIT", NULL },
426 {}
427};
428
429void
430nv50_fb_trap(struct nouveau_fb *pfb, int display)
431{
432 struct nouveau_device *device = nv_device(pfb);
433 struct nv50_fb_priv *priv = (void *)pfb;
434 const struct nouveau_enum *en, *cl;
435 u32 trap[6], idx, chan;
436 u8 st0, st1, st2, st3;
437 int i;
438
439 idx = nv_rd32(priv, 0x100c90);
440 if (!(idx & 0x80000000))
441 return;
442 idx &= 0x00ffffff;
443
444 for (i = 0; i < 6; i++) {
445 nv_wr32(priv, 0x100c90, idx | i << 24);
446 trap[i] = nv_rd32(priv, 0x100c94);
447 }
448 nv_wr32(priv, 0x100c90, idx | 0x80000000);
449
450 if (!display)
451 return;
452
453 /* decode status bits into something more useful */
454 if (device->chipset < 0xa3 ||
455 device->chipset == 0xaa || device->chipset == 0xac) {
456 st0 = (trap[0] & 0x0000000f) >> 0;
457 st1 = (trap[0] & 0x000000f0) >> 4;
458 st2 = (trap[0] & 0x00000f00) >> 8;
459 st3 = (trap[0] & 0x0000f000) >> 12;
460 } else {
461 st0 = (trap[0] & 0x000000ff) >> 0;
462 st1 = (trap[0] & 0x0000ff00) >> 8;
463 st2 = (trap[0] & 0x00ff0000) >> 16;
464 st3 = (trap[0] & 0xff000000) >> 24;
465 }
466 chan = (trap[2] << 16) | trap[1];
467
468 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
469 (trap[5] & 0x00000100) ? "read" : "write",
470 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
471
472 en = nouveau_enum_find(vm_engine, st0);
473 if (en)
474 printk("%s/", en->name);
475 else
476 printk("%02x/", st0);
477
478 cl = nouveau_enum_find(vm_client, st2);
479 if (cl)
480 printk("%s/", cl->name);
481 else
482 printk("%02x/", st2);
483
484 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
485 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
486 else cl = NULL;
487 if (cl)
488 printk("%s", cl->name);
489 else
490 printk("%02x", st3);
491
492 printk(" reason: ");
493 en = nouveau_enum_find(vm_fault, st1);
494 if (en)
495 printk("%s\n", en->name);
496 else
497 printk("0x%08x\n", st1);
498}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
new file mode 100644
index 000000000000..9f59f2bf0079
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26#include <subdev/bios.h>
27
28struct nvc0_fb_priv {
29 struct nouveau_fb base;
30 struct page *r100c10_page;
31 dma_addr_t r100c10;
32};
33
34/* 0 = unsupported
35 * 1 = non-compressed
36 * 3 = compressed
37 */
38static const u8 types[256] = {
39 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
40 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
43 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
50 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
51 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
52 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
53 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
54 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
55};
56
57static bool
58nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
59{
60 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
61 return likely((types[memtype] == 1));
62}
63
64static int
65nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
66 u32 memtype, struct nouveau_mem **pmem)
67{
68 struct nouveau_mm *mm = &pfb->vram;
69 struct nouveau_mm_node *r;
70 struct nouveau_mem *mem;
71 int type = (memtype & 0x0ff);
72 int back = (memtype & 0x800);
73 int ret;
74
75 size >>= 12;
76 align >>= 12;
77 ncmin >>= 12;
78 if (!ncmin)
79 ncmin = size;
80
81 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
82 if (!mem)
83 return -ENOMEM;
84
85 INIT_LIST_HEAD(&mem->regions);
86 mem->memtype = type;
87 mem->size = size;
88
89 mutex_lock(&mm->mutex);
90 do {
91 if (back)
92 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
93 else
94 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
95 if (ret) {
96 mutex_unlock(&mm->mutex);
97 pfb->ram.put(pfb, &mem);
98 return ret;
99 }
100
101 list_add_tail(&r->rl_entry, &mem->regions);
102 size -= r->length;
103 } while (size);
104 mutex_unlock(&mm->mutex);
105
106 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
107 mem->offset = (u64)r->offset << 12;
108 *pmem = mem;
109 return 0;
110}
111
112static int
113nvc0_fb_init(struct nouveau_object *object)
114{
115 struct nvc0_fb_priv *priv = (void *)object;
116 int ret;
117
118 ret = nouveau_fb_init(&priv->base);
119 if (ret)
120 return ret;
121
122 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
123 return 0;
124}
125
126static void
127nvc0_fb_dtor(struct nouveau_object *object)
128{
129 struct nouveau_device *device = nv_device(object);
130 struct nvc0_fb_priv *priv = (void *)object;
131
132 if (priv->r100c10_page) {
133 pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE,
134 PCI_DMA_BIDIRECTIONAL);
135 __free_page(priv->r100c10_page);
136 }
137
138 nouveau_fb_destroy(&priv->base);
139}
140
141static int
142nvc0_vram_detect(struct nvc0_fb_priv *priv)
143{
144 struct nouveau_bios *bios = nouveau_bios(priv);
145 struct nouveau_fb *pfb = &priv->base;
146 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
147 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
148 u32 parts = nv_rd32(priv, 0x022438);
149 u32 pmask = nv_rd32(priv, 0x022554);
150 u32 bsize = nv_rd32(priv, 0x10f20c);
151 u32 offset, length;
152 bool uniform = true;
153 int ret, part;
154
155 nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
156 nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
157
158 priv->base.ram.type = nouveau_fb_bios_memtype(bios);
159 priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
160
161 /* read amount of vram attached to each memory controller */
162 for (part = 0; part < parts; part++) {
163 if (!(pmask & (1 << part))) {
164 u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
165 if (psize != bsize) {
166 if (psize < bsize)
167 bsize = psize;
168 uniform = false;
169 }
170
171 nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
172 priv->base.ram.size += (u64)psize << 20;
173 }
174 }
175
176 /* if all controllers have the same amount attached, there's no holes */
177 if (uniform) {
178 offset = rsvd_head;
179 length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
180 return nouveau_mm_init(&pfb->vram, offset, length, 1);
181 }
182
183 /* otherwise, address lowest common amount from 0GiB */
184 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
185 if (ret)
186 return ret;
187
188 /* and the rest starting from (8GiB + common_size) */
189 offset = (0x0200000000ULL >> 12) + (bsize << 8);
190 length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
191
192 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
193 if (ret) {
194 nouveau_mm_fini(&pfb->vram);
195 return ret;
196 }
197
198 return 0;
199}
200
201static int
202nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nouveau_oclass *oclass, void *data, u32 size,
204 struct nouveau_object **pobject)
205{
206 struct nouveau_device *device = nv_device(parent);
207 struct nvc0_fb_priv *priv;
208 int ret;
209
210 ret = nouveau_fb_create(parent, engine, oclass, &priv);
211 *pobject = nv_object(priv);
212 if (ret)
213 return ret;
214
215 priv->base.memtype_valid = nvc0_fb_memtype_valid;
216 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del;
218
219 ret = nvc0_vram_detect(priv);
220 if (ret)
221 return ret;
222
223 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 if (!priv->r100c10_page)
225 return -ENOMEM;
226
227 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0,
228 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
229 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
230 return -EFAULT;
231
232 return nouveau_fb_created(&priv->base);
233}
234
235
236struct nouveau_oclass
237nvc0_fb_oclass = {
238 .handle = NV_SUBDEV(FB, 0xc0),
239 .ofuncs = &(struct nouveau_ofuncs) {
240 .ctor = nvc0_fb_ctor,
241 .dtor = nvc0_fb_dtor,
242 .init = nvc0_fb_init,
243 .fini = _nouveau_fb_fini,
244 },
245};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
new file mode 100644
index 000000000000..abb135f74953
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26#include <subdev/bios.h>
27#include <subdev/bios/gpio.h>
28
29static int
30nouveau_gpio_drive(struct nouveau_gpio *gpio,
31 int idx, int line, int dir, int out)
32{
33 return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV;
34}
35
36static int
37nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
38{
39 return gpio->sense ? gpio->sense(gpio, line) : -ENODEV;
40}
41
42static int
43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
44 struct dcb_gpio_func *func)
45{
46 if (line == 0xff && tag == 0xff)
47 return -EINVAL;
48
49 if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
50 return 0;
51
52 /* Apple iMac G4 NV18 */
53 if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
54 if (tag == DCB_GPIO_TVDAC0) {
55 *func = (struct dcb_gpio_func) {
56 .func = DCB_GPIO_TVDAC0,
57 .line = 4,
58 .log[0] = 0,
59 .log[1] = 1,
60 };
61 return 0;
62 }
63 }
64
65 return -EINVAL;
66}
67
68static int
69nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state)
70{
71 struct dcb_gpio_func func;
72 int ret;
73
74 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
75 if (ret == 0) {
76 int dir = !!(func.log[state] & 0x02);
77 int out = !!(func.log[state] & 0x01);
78 ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out);
79 }
80
81 return ret;
82}
83
84static int
85nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
86{
87 struct dcb_gpio_func func;
88 int ret;
89
90 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
91 if (ret == 0) {
92 ret = nouveau_gpio_sense(gpio, idx, func.line);
93 if (ret >= 0)
94 ret = (ret == (func.log[1] & 1));
95 }
96
97 return ret;
98}
99
100static int
101nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
102{
103 struct dcb_gpio_func func;
104 int ret;
105
106 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
107 if (ret == 0) {
108 if (idx == 0 && gpio->irq_enable)
109 gpio->irq_enable(gpio, func.line, on);
110 else
111 ret = -ENODEV;
112 }
113
114 return ret;
115}
116
117struct gpio_isr {
118 struct nouveau_gpio *gpio;
119 struct list_head head;
120 struct work_struct work;
121 int idx;
122 struct dcb_gpio_func func;
123 void (*handler)(void *, int);
124 void *data;
125 bool inhibit;
126};
127
128static void
129nouveau_gpio_isr_bh(struct work_struct *work)
130{
131 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
132 struct nouveau_gpio *gpio = isr->gpio;
133 unsigned long flags;
134 int state;
135
136 state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
137 isr->func.line);
138 if (state >= 0)
139 isr->handler(isr->data, state);
140
141 spin_lock_irqsave(&gpio->lock, flags);
142 isr->inhibit = false;
143 spin_unlock_irqrestore(&gpio->lock, flags);
144}
145
146static void
147nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
148{
149 struct gpio_isr *isr;
150
151 if (idx != 0)
152 return;
153
154 spin_lock(&gpio->lock);
155 list_for_each_entry(isr, &gpio->isr, head) {
156 if (line_mask & (1 << isr->func.line)) {
157 if (isr->inhibit)
158 continue;
159 isr->inhibit = true;
160 schedule_work(&isr->work);
161 }
162 }
163 spin_unlock(&gpio->lock);
164}
165
166static int
167nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
168 void (*handler)(void *, int), void *data)
169{
170 struct gpio_isr *isr;
171 unsigned long flags;
172 int ret;
173
174 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
175 if (!isr)
176 return -ENOMEM;
177
178 ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
179 if (ret) {
180 kfree(isr);
181 return ret;
182 }
183
184 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
185 isr->gpio = gpio;
186 isr->handler = handler;
187 isr->data = data;
188 isr->idx = idx;
189
190 spin_lock_irqsave(&gpio->lock, flags);
191 list_add(&isr->head, &gpio->isr);
192 spin_unlock_irqrestore(&gpio->lock, flags);
193 return 0;
194}
195
196static void
197nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
198 void (*handler)(void *, int), void *data)
199{
200 struct gpio_isr *isr, *tmp;
201 struct dcb_gpio_func func;
202 unsigned long flags;
203 LIST_HEAD(tofree);
204 int ret;
205
206 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
207 if (ret == 0) {
208 spin_lock_irqsave(&gpio->lock, flags);
209 list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
210 if (memcmp(&isr->func, &func, sizeof(func)) ||
211 isr->idx != idx ||
212 isr->handler != handler || isr->data != data)
213 continue;
214 list_move_tail(&isr->head, &tofree);
215 }
216 spin_unlock_irqrestore(&gpio->lock, flags);
217
218 list_for_each_entry_safe(isr, tmp, &tofree, head) {
219 flush_work_sync(&isr->work);
220 kfree(isr);
221 }
222 }
223}
224
225int
226nouveau_gpio_create_(struct nouveau_object *parent,
227 struct nouveau_object *engine,
228 struct nouveau_oclass *oclass, int length, void **pobject)
229{
230 struct nouveau_gpio *gpio;
231 int ret;
232
233 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio",
234 length, pobject);
235 gpio = *pobject;
236 if (ret)
237 return ret;
238
239 gpio->find = nouveau_gpio_find;
240 gpio->set = nouveau_gpio_set;
241 gpio->get = nouveau_gpio_get;
242 gpio->irq = nouveau_gpio_irq;
243 gpio->isr_run = nouveau_gpio_isr_run;
244 gpio->isr_add = nouveau_gpio_isr_add;
245 gpio->isr_del = nouveau_gpio_isr_del;
246 INIT_LIST_HEAD(&gpio->isr);
247 spin_lock_init(&gpio->lock);
248 return 0;
249}
250
251static struct dmi_system_id gpio_reset_ids[] = {
252 {
253 .ident = "Apple Macbook 10,1",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
256 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
257 }
258 },
259 { }
260};
261
262int
263nouveau_gpio_init(struct nouveau_gpio *gpio)
264{
265 int ret = nouveau_subdev_init(&gpio->base);
266 if (ret == 0 && gpio->reset) {
267 if (dmi_check_system(gpio_reset_ids))
268 gpio->reset(gpio);
269 }
270 return ret;
271}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
new file mode 100644
index 000000000000..168d16a9a8e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/gpio.h>
28
29struct nv10_gpio_priv {
30 struct nouveau_gpio base;
31};
32
33static int
34nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
35{
36 if (line < 2) {
37 line = line * 16;
38 line = nv_rd32(gpio, 0x600818) >> line;
39 return !!(line & 0x0100);
40 } else
41 if (line < 10) {
42 line = (line - 2) * 4;
43 line = nv_rd32(gpio, 0x60081c) >> line;
44 return !!(line & 0x04);
45 } else
46 if (line < 14) {
47 line = (line - 10) * 4;
48 line = nv_rd32(gpio, 0x600850) >> line;
49 return !!(line & 0x04);
50 }
51
52 return -EINVAL;
53}
54
55static int
56nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
57{
58 u32 reg, mask, data;
59
60 if (line < 2) {
61 line = line * 16;
62 reg = 0x600818;
63 mask = 0x00000011;
64 data = (dir << 4) | out;
65 } else
66 if (line < 10) {
67 line = (line - 2) * 4;
68 reg = 0x60081c;
69 mask = 0x00000003;
70 data = (dir << 1) | out;
71 } else
72 if (line < 14) {
73 line = (line - 10) * 4;
74 reg = 0x600850;
75 mask = 0x00000003;
76 data = (dir << 1) | out;
77 } else {
78 return -EINVAL;
79 }
80
81 nv_mask(gpio, reg, mask << line, data << line);
82 return 0;
83}
84
85static void
86nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
87{
88 u32 mask = 0x00010001 << line;
89
90 nv_wr32(gpio, 0x001104, mask);
91 nv_mask(gpio, 0x001144, mask, on ? mask : 0);
92}
93
94static void
95nv10_gpio_intr(struct nouveau_subdev *subdev)
96{
97 struct nv10_gpio_priv *priv = (void *)subdev;
98 u32 intr = nv_rd32(priv, 0x001104);
99 u32 hi = (intr & 0x0000ffff) >> 0;
100 u32 lo = (intr & 0xffff0000) >> 16;
101
102 priv->base.isr_run(&priv->base, 0, hi | lo);
103
104 nv_wr32(priv, 0x001104, intr);
105}
106
107static int
108nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, void *data, u32 size,
110 struct nouveau_object **pobject)
111{
112 struct nv10_gpio_priv *priv;
113 int ret;
114
115 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
116 *pobject = nv_object(priv);
117 if (ret)
118 return ret;
119
120 priv->base.drive = nv10_gpio_drive;
121 priv->base.sense = nv10_gpio_sense;
122 priv->base.irq_enable = nv10_gpio_irq_enable;
123 nv_subdev(priv)->intr = nv10_gpio_intr;
124 return 0;
125}
126
127static void
128nv10_gpio_dtor(struct nouveau_object *object)
129{
130 struct nv10_gpio_priv *priv = (void *)object;
131 nouveau_gpio_destroy(&priv->base);
132}
133
134static int
135nv10_gpio_init(struct nouveau_object *object)
136{
137 struct nv10_gpio_priv *priv = (void *)object;
138 int ret;
139
140 ret = nouveau_gpio_init(&priv->base);
141 if (ret)
142 return ret;
143
144 nv_wr32(priv, 0x001140, 0x00000000);
145 nv_wr32(priv, 0x001100, 0xffffffff);
146 nv_wr32(priv, 0x001144, 0x00000000);
147 nv_wr32(priv, 0x001104, 0xffffffff);
148 return 0;
149}
150
151static int
152nv10_gpio_fini(struct nouveau_object *object, bool suspend)
153{
154 struct nv10_gpio_priv *priv = (void *)object;
155 nv_wr32(priv, 0x001140, 0x00000000);
156 nv_wr32(priv, 0x001144, 0x00000000);
157 return nouveau_gpio_fini(&priv->base, suspend);
158}
159
160struct nouveau_oclass
161nv10_gpio_oclass = {
162 .handle = NV_SUBDEV(GPIO, 0x10),
163 .ofuncs = &(struct nouveau_ofuncs) {
164 .ctor = nv10_gpio_ctor,
165 .dtor = nv10_gpio_dtor,
166 .init = nv10_gpio_init,
167 .fini = nv10_gpio_fini,
168 },
169};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
new file mode 100644
index 000000000000..f3502c961cd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27struct nv50_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31static void
32nv50_gpio_reset(struct nouveau_gpio *gpio)
33{
34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nv50_gpio_priv *priv = (void *)gpio;
36 u16 entry;
37 u8 ver;
38 int ent = -1;
39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
41 static const u32 regs[] = { 0xe100, 0xe28c };
42 u32 data = nv_ro32(bios, entry);
43 u8 line = (data & 0x0000001f);
44 u8 func = (data & 0x0000ff00) >> 8;
45 u8 defs = !!(data & 0x01000000);
46 u8 unk0 = !!(data & 0x02000000);
47 u8 unk1 = !!(data & 0x04000000);
48 u32 val = (unk1 << 16) | unk0;
49 u32 reg = regs[line >> 4]; line &= 0x0f;
50
51 if (func == 0xff)
52 continue;
53
54 gpio->set(gpio, 0, func, line, defs);
55
56 nv_mask(priv, reg, 0x00010001 << line, val << line);
57 }
58}
59
60static int
61nv50_gpio_location(int line, u32 *reg, u32 *shift)
62{
63 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
64
65 if (line >= 32)
66 return -EINVAL;
67
68 *reg = nv50_gpio_reg[line >> 3];
69 *shift = (line & 7) << 2;
70 return 0;
71}
72
73static int
74nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
75{
76 u32 reg, shift;
77
78 if (nv50_gpio_location(line, &reg, &shift))
79 return -EINVAL;
80
81 nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
82 return 0;
83}
84
85static int
86nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
87{
88 u32 reg, shift;
89
90 if (nv50_gpio_location(line, &reg, &shift))
91 return -EINVAL;
92
93 return !!(nv_rd32(gpio, reg) & (4 << shift));
94}
95
96void
97nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
98{
99 u32 reg = line < 16 ? 0xe050 : 0xe070;
100 u32 mask = 0x00010001 << (line & 0xf);
101
102 nv_wr32(gpio, reg + 4, mask);
103 nv_mask(gpio, reg + 0, mask, on ? mask : 0);
104}
105
106void
107nv50_gpio_intr(struct nouveau_subdev *subdev)
108{
109 struct nv50_gpio_priv *priv = (void *)subdev;
110 u32 intr0, intr1 = 0;
111 u32 hi, lo;
112
113 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
114 if (nv_device(priv)->chipset >= 0x90)
115 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
116
117 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
118 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
119 priv->base.isr_run(&priv->base, 0, hi | lo);
120
121 nv_wr32(priv, 0xe054, intr0);
122 if (nv_device(priv)->chipset >= 0x90)
123 nv_wr32(priv, 0xe074, intr1);
124}
125
126static int
127nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
128 struct nouveau_oclass *oclass, void *data, u32 size,
129 struct nouveau_object **pobject)
130{
131 struct nv50_gpio_priv *priv;
132 int ret;
133
134 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
135 *pobject = nv_object(priv);
136 if (ret)
137 return ret;
138
139 priv->base.reset = nv50_gpio_reset;
140 priv->base.drive = nv50_gpio_drive;
141 priv->base.sense = nv50_gpio_sense;
142 priv->base.irq_enable = nv50_gpio_irq_enable;
143 nv_subdev(priv)->intr = nv50_gpio_intr;
144 return 0;
145}
146
147void
148nv50_gpio_dtor(struct nouveau_object *object)
149{
150 struct nv50_gpio_priv *priv = (void *)object;
151 nouveau_gpio_destroy(&priv->base);
152}
153
154int
155nv50_gpio_init(struct nouveau_object *object)
156{
157 struct nv50_gpio_priv *priv = (void *)object;
158 int ret;
159
160 ret = nouveau_gpio_init(&priv->base);
161 if (ret)
162 return ret;
163
164 /* disable, and ack any pending gpio interrupts */
165 nv_wr32(priv, 0xe050, 0x00000000);
166 nv_wr32(priv, 0xe054, 0xffffffff);
167 if (nv_device(priv)->chipset >= 0x90) {
168 nv_wr32(priv, 0xe070, 0x00000000);
169 nv_wr32(priv, 0xe074, 0xffffffff);
170 }
171
172 return 0;
173}
174
175int
176nv50_gpio_fini(struct nouveau_object *object, bool suspend)
177{
178 struct nv50_gpio_priv *priv = (void *)object;
179 nv_wr32(priv, 0xe050, 0x00000000);
180 if (nv_device(priv)->chipset >= 0x90)
181 nv_wr32(priv, 0xe070, 0x00000000);
182 return nouveau_gpio_fini(&priv->base, suspend);
183}
184
185struct nouveau_oclass
186nv50_gpio_oclass = {
187 .handle = NV_SUBDEV(GPIO, 0x50),
188 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nv50_gpio_ctor,
190 .dtor = nv50_gpio_dtor,
191 .init = nv50_gpio_init,
192 .fini = nv50_gpio_fini,
193 },
194};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
new file mode 100644
index 000000000000..8d18fcad26e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27struct nvd0_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31static void
32nvd0_gpio_reset(struct nouveau_gpio *gpio)
33{
34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nvd0_gpio_priv *priv = (void *)gpio;
36 u16 entry;
37 u8 ver;
38 int ent = -1;
39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
41 u32 data = nv_ro32(bios, entry);
42 u8 line = (data & 0x0000003f);
43 u8 defs = !!(data & 0x00000080);
44 u8 func = (data & 0x0000ff00) >> 8;
45 u8 unk0 = (data & 0x00ff0000) >> 16;
46 u8 unk1 = (data & 0x1f000000) >> 24;
47
48 if (func == 0xff)
49 continue;
50
51 gpio->set(gpio, 0, func, line, defs);
52
53 nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0);
54 if (unk1--)
55 nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line);
56 }
57}
58
59static int
60nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
61{
62 u32 data = ((dir ^ 1) << 13) | (out << 12);
63 nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
64 nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
65 return 0;
66}
67
68static int
69nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
70{
71 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
72}
73
74static int
75nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 size,
77 struct nouveau_object **pobject)
78{
79 struct nvd0_gpio_priv *priv;
80 int ret;
81
82 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
83 *pobject = nv_object(priv);
84 if (ret)
85 return ret;
86
87 priv->base.reset = nvd0_gpio_reset;
88 priv->base.drive = nvd0_gpio_drive;
89 priv->base.sense = nvd0_gpio_sense;
90 priv->base.irq_enable = nv50_gpio_irq_enable;
91 nv_subdev(priv)->intr = nv50_gpio_intr;
92 return 0;
93}
94
95struct nouveau_oclass
96nvd0_gpio_oclass = {
97 .handle = NV_SUBDEV(GPIO, 0xd0),
98 .ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nvd0_gpio_ctor,
100 .dtor = nv50_gpio_dtor,
101 .init = nv50_gpio_init,
102 .fini = nv50_gpio_fini,
103 },
104};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
new file mode 100644
index 000000000000..fe1ebf199ba9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26
27/******************************************************************************
28 * aux channel util functions
29 *****************************************************************************/
30#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
31#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
32
33static void
34auxch_fini(struct nouveau_i2c *aux, int ch)
35{
36 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
37}
38
39static int
40auxch_init(struct nouveau_i2c *aux, int ch)
41{
42 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
43 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
44 const u32 urep = unksel ? 0x01000000 : 0x02000000;
45 u32 ctrl, timeout;
46
47 /* wait up to 1ms for any previous transaction to be done... */
48 timeout = 1000;
49 do {
50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
51 udelay(1);
52 if (!timeout--) {
53 AUX_ERR("begin idle timeout 0x%08x", ctrl);
54 return -EBUSY;
55 }
56 } while (ctrl & 0x03010000);
57
58 /* set some magic, and wait up to 1ms for it to appear */
59 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
60 timeout = 1000;
61 do {
62 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
63 udelay(1);
64 if (!timeout--) {
65 AUX_ERR("magic wait 0x%08x\n", ctrl);
66 auxch_fini(aux, ch);
67 return -EBUSY;
68 }
69 } while ((ctrl & 0x03000000) != urep);
70
71 return 0;
72}
73
74static int
75auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
76{
77 u32 ctrl, stat, timeout, retries;
78 u32 xbuf[4] = {};
79 int ret, i;
80
81 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
82
83 ret = auxch_init(aux, ch);
84 if (ret)
85 goto out;
86
87 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
88 if (!(stat & 0x10000000)) {
89 AUX_DBG("sink not detected\n");
90 ret = -ENXIO;
91 goto out;
92 }
93
94 if (!(type & 1)) {
95 memcpy(xbuf, data, size);
96 for (i = 0; i < 16; i += 4) {
97 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
98 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
99 }
100 }
101
102 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
103 ctrl &= ~0x0001f0ff;
104 ctrl |= type << 12;
105 ctrl |= size - 1;
106 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
107
108 /* retry transaction a number of times on failure... */
109 ret = -EREMOTEIO;
110 for (retries = 0; retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
114 if (retries)
115 udelay(400);
116
117 /* transaction request, wait up to 1ms for it to complete */
118 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
119
120 timeout = 1000;
121 do {
122 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
123 udelay(1);
124 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
126 goto out;
127 }
128 } while (ctrl & 0x00010000);
129
130 /* read status, and check if transaction completed ok */
131 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
132 if (!(stat & 0x000f0f00)) {
133 ret = 0;
134 break;
135 }
136
137 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
138 }
139
140 if (type & 1) {
141 for (i = 0; i < 16; i += 4) {
142 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
143 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
144 }
145 memcpy(data, xbuf, size);
146 }
147
148out:
149 auxch_fini(aux, ch);
150 return ret;
151}
152
153int
154nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
155{
156 return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
157}
158
159int
160nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
161{
162 return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
163}
164
165static int
166aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
167{
168 struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
169 struct i2c_msg *msg = msgs;
170 int ret, mcnt = num;
171
172 while (mcnt--) {
173 u8 remaining = msg->len;
174 u8 *ptr = msg->buf;
175
176 while (remaining) {
177 u8 cnt = (remaining > 16) ? 16 : remaining;
178 u8 cmd;
179
180 if (msg->flags & I2C_M_RD)
181 cmd = 1;
182 else
183 cmd = 0;
184
185 if (mcnt || remaining > 16)
186 cmd |= 4; /* MOT */
187
188 ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
189 msg->addr, ptr, cnt);
190 if (ret < 0)
191 return ret;
192
193 ptr += cnt;
194 remaining -= cnt;
195 }
196
197 msg++;
198 }
199
200 return num;
201}
202
203static u32
204aux_func(struct i2c_adapter *adap)
205{
206 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
207}
208
209const struct i2c_algorithm nouveau_i2c_aux_algo = {
210 .master_xfer = aux_xfer,
211 .functionality = aux_func
212};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
new file mode 100644
index 000000000000..3d2c88310f98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -0,0 +1,407 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/option.h"
26
27#include "subdev/i2c.h"
28#include "subdev/vga.h"
29
30int
31nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
32{
33 u8 val;
34 struct i2c_msg msgs[] = {
35 { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
36 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
37 };
38
39 int ret = i2c_transfer(&port->adapter, msgs, 2);
40 if (ret != 2)
41 return -EIO;
42
43 return val;
44}
45
46int
47nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
48{
49 struct i2c_msg msgs[] = {
50 { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
51 { .addr = addr, .flags = 0, .len = 1, .buf = &val },
52 };
53
54 int ret = i2c_transfer(&port->adapter, msgs, 2);
55 if (ret != 2)
56 return -EIO;
57
58 return 0;
59}
60
61bool
62nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
63{
64 u8 buf[] = { 0 };
65 struct i2c_msg msgs[] = {
66 {
67 .addr = addr,
68 .flags = 0,
69 .len = 1,
70 .buf = buf,
71 },
72 {
73 .addr = addr,
74 .flags = I2C_M_RD,
75 .len = 1,
76 .buf = buf,
77 }
78 };
79
80 return i2c_transfer(&port->adapter, msgs, 2) == 2;
81}
82
83static struct nouveau_i2c_port *
84nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
85{
86 struct nouveau_bios *bios = nouveau_bios(i2c);
87 struct nouveau_i2c_port *port;
88
89 if (index == NV_I2C_DEFAULT(0) ||
90 index == NV_I2C_DEFAULT(1)) {
91 u8 ver, hdr, cnt, len;
92 u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
93 if (i2c && ver >= 0x30) {
94 u8 auxidx = nv_ro08(bios, i2c + 4);
95 if (index == NV_I2C_DEFAULT(0))
96 index = (auxidx & 0x0f) >> 0;
97 else
98 index = (auxidx & 0xf0) >> 4;
99 } else {
100 index = 2;
101 }
102 }
103
104 list_for_each_entry(port, &i2c->ports, head) {
105 if (port->index == index)
106 break;
107 }
108
109 if (&port->head == &i2c->ports)
110 return NULL;
111
112 if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
113 u32 reg = 0x00e500, val;
114 if (port->type == 6) {
115 reg += port->drive * 0x50;
116 val = 0x2002;
117 } else {
118 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
119 val = 0xe001;
120 }
121
122 /* nfi, but neither auxch or i2c work if it's 1 */
123 nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
124 /* nfi, but switches auxch vs normal i2c */
125 nv_mask(i2c, reg + 0x00, 0x0000f003, val);
126 }
127
128 return port;
129}
130
131static int
132nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
133 struct i2c_board_info *info,
134 bool (*match)(struct nouveau_i2c_port *,
135 struct i2c_board_info *))
136{
137 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
138 int i;
139
140 if (!port) {
141 nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
142 return -ENODEV;
143 }
144
145 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
146 for (i = 0; info[i].addr; i++) {
147 if (nv_probe_i2c(port, info[i].addr) &&
148 (!match || match(port, &info[i]))) {
149 nv_info(i2c, "detected %s: %s\n", what, info[i].type);
150 return i;
151 }
152 }
153
154 nv_debug(i2c, "no devices found.\n");
155 return -ENODEV;
156}
157
158void
159nouveau_i2c_drive_scl(void *data, int state)
160{
161 struct nouveau_i2c_port *port = data;
162
163 if (port->type == DCB_I2C_NV04_BIT) {
164 u8 val = nv_rdvgac(port->i2c, 0, port->drive);
165 if (state) val |= 0x20;
166 else val &= 0xdf;
167 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
168 } else
169 if (port->type == DCB_I2C_NV4E_BIT) {
170 nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
171 } else
172 if (port->type == DCB_I2C_NVIO_BIT) {
173 if (state) port->state |= 0x01;
174 else port->state &= 0xfe;
175 nv_wr32(port->i2c, port->drive, 4 | port->state);
176 }
177}
178
179void
180nouveau_i2c_drive_sda(void *data, int state)
181{
182 struct nouveau_i2c_port *port = data;
183
184 if (port->type == DCB_I2C_NV04_BIT) {
185 u8 val = nv_rdvgac(port->i2c, 0, port->drive);
186 if (state) val |= 0x10;
187 else val &= 0xef;
188 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
189 } else
190 if (port->type == DCB_I2C_NV4E_BIT) {
191 nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
192 } else
193 if (port->type == DCB_I2C_NVIO_BIT) {
194 if (state) port->state |= 0x02;
195 else port->state &= 0xfd;
196 nv_wr32(port->i2c, port->drive, 4 | port->state);
197 }
198}
199
200int
201nouveau_i2c_sense_scl(void *data)
202{
203 struct nouveau_i2c_port *port = data;
204 struct nouveau_device *device = nv_device(port->i2c);
205
206 if (port->type == DCB_I2C_NV04_BIT) {
207 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
208 } else
209 if (port->type == DCB_I2C_NV4E_BIT) {
210 return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
211 } else
212 if (port->type == DCB_I2C_NVIO_BIT) {
213 if (device->card_type < NV_D0)
214 return !!(nv_rd32(port->i2c, port->sense) & 0x01);
215 else
216 return !!(nv_rd32(port->i2c, port->sense) & 0x10);
217 }
218
219 return 0;
220}
221
222int
223nouveau_i2c_sense_sda(void *data)
224{
225 struct nouveau_i2c_port *port = data;
226 struct nouveau_device *device = nv_device(port->i2c);
227
228 if (port->type == DCB_I2C_NV04_BIT) {
229 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
230 } else
231 if (port->type == DCB_I2C_NV4E_BIT) {
232 return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
233 } else
234 if (port->type == DCB_I2C_NVIO_BIT) {
235 if (device->card_type < NV_D0)
236 return !!(nv_rd32(port->i2c, port->sense) & 0x02);
237 else
238 return !!(nv_rd32(port->i2c, port->sense) & 0x20);
239 }
240
241 return 0;
242}
243
244static const u32 nv50_i2c_port[] = {
245 0x00e138, 0x00e150, 0x00e168, 0x00e180,
246 0x00e254, 0x00e274, 0x00e764, 0x00e780,
247 0x00e79c, 0x00e7b8
248};
249
250static int
251nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
252 struct nouveau_oclass *oclass, void *data, u32 size,
253 struct nouveau_object **pobject)
254{
255 struct nouveau_device *device = nv_device(parent);
256 struct nouveau_bios *bios = nouveau_bios(parent);
257 struct nouveau_i2c_port *port;
258 struct nouveau_i2c *i2c;
259 struct dcb_i2c_entry info;
260 int ret, i = -1;
261
262 ret = nouveau_subdev_create(parent, engine, oclass, 0,
263 "I2C", "i2c", &i2c);
264 *pobject = nv_object(i2c);
265 if (ret)
266 return ret;
267
268 i2c->find = nouveau_i2c_find;
269 i2c->identify = nouveau_i2c_identify;
270 INIT_LIST_HEAD(&i2c->ports);
271
272 while (!dcb_i2c_parse(bios, ++i, &info)) {
273 if (info.type == DCB_I2C_UNUSED)
274 continue;
275
276 port = kzalloc(sizeof(*port), GFP_KERNEL);
277 if (!port) {
278 nv_error(i2c, "failed port memory alloc at %d\n", i);
279 break;
280 }
281
282 port->type = info.type;
283 switch (port->type) {
284 case DCB_I2C_NV04_BIT:
285 port->drive = info.drive;
286 port->sense = info.sense;
287 break;
288 case DCB_I2C_NV4E_BIT:
289 port->drive = 0x600800 + info.drive;
290 port->sense = port->drive;
291 break;
292 case DCB_I2C_NVIO_BIT:
293 port->drive = info.drive & 0x0f;
294 if (device->card_type < NV_D0) {
295 if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
296 break;
297 port->drive = nv50_i2c_port[port->drive];
298 port->sense = port->drive;
299 } else {
300 port->drive = 0x00d014 + (port->drive * 0x20);
301 port->sense = port->drive;
302 }
303 break;
304 case DCB_I2C_NVIO_AUX:
305 port->drive = info.drive & 0x0f;
306 port->sense = port->drive;
307 port->adapter.algo = &nouveau_i2c_aux_algo;
308 break;
309 default:
310 break;
311 }
312
313 if (!port->adapter.algo && !port->drive) {
314 nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
315 i, port->type, port->drive, port->sense);
316 kfree(port);
317 continue;
318 }
319
320 snprintf(port->adapter.name, sizeof(port->adapter.name),
321 "nouveau-%s-%d", device->name, i);
322 port->adapter.owner = THIS_MODULE;
323 port->adapter.dev.parent = &device->pdev->dev;
324 port->i2c = i2c;
325 port->index = i;
326 port->dcb = info.data;
327 i2c_set_adapdata(&port->adapter, i2c);
328
329 if (port->adapter.algo != &nouveau_i2c_aux_algo) {
330 nouveau_i2c_drive_scl(port, 0);
331 nouveau_i2c_drive_sda(port, 1);
332 nouveau_i2c_drive_scl(port, 1);
333
334#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
335 if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
336#else
337 if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
338#endif
339 port->adapter.algo = &nouveau_i2c_bit_algo;
340 ret = i2c_add_adapter(&port->adapter);
341 } else {
342 port->adapter.algo_data = &port->bit;
343 port->bit.udelay = 10;
344 port->bit.timeout = usecs_to_jiffies(2200);
345 port->bit.data = port;
346 port->bit.setsda = nouveau_i2c_drive_sda;
347 port->bit.setscl = nouveau_i2c_drive_scl;
348 port->bit.getsda = nouveau_i2c_sense_sda;
349 port->bit.getscl = nouveau_i2c_sense_scl;
350 ret = i2c_bit_add_bus(&port->adapter);
351 }
352 } else {
353 port->adapter.algo = &nouveau_i2c_aux_algo;
354 ret = i2c_add_adapter(&port->adapter);
355 }
356
357 if (ret) {
358 nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
359 kfree(port);
360 continue;
361 }
362
363 list_add_tail(&port->head, &i2c->ports);
364 }
365
366 return 0;
367}
368
369static void
370nouveau_i2c_dtor(struct nouveau_object *object)
371{
372 struct nouveau_i2c *i2c = (void *)object;
373 struct nouveau_i2c_port *port, *temp;
374
375 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
376 i2c_del_adapter(&port->adapter);
377 list_del(&port->head);
378 kfree(port);
379 }
380
381 nouveau_subdev_destroy(&i2c->base);
382}
383
384static int
385nouveau_i2c_init(struct nouveau_object *object)
386{
387 struct nouveau_i2c *i2c = (void *)object;
388 return nouveau_subdev_init(&i2c->base);
389}
390
391static int
392nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
393{
394 struct nouveau_i2c *i2c = (void *)object;
395 return nouveau_subdev_fini(&i2c->base, suspend);
396}
397
398struct nouveau_oclass
399nouveau_i2c_oclass = {
400 .handle = NV_SUBDEV(I2C, 0x00),
401 .ofuncs = &(struct nouveau_ofuncs) {
402 .ctor = nouveau_i2c_ctor,
403 .dtor = nouveau_i2c_dtor,
404 .init = nouveau_i2c_init,
405 .fini = nouveau_i2c_fini,
406 },
407};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
new file mode 100644
index 000000000000..1c4c9a5c8e2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -0,0 +1,230 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/i2c.h"
26
27#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
28#define T_TIMEOUT 2200000
29#define T_RISEFALL 1000
30#define T_HOLD 5000
31
32static inline void
33i2c_drive_scl(struct nouveau_i2c_port *port, int state)
34{
35 nouveau_i2c_drive_scl(port, state);
36}
37
38static inline void
39i2c_drive_sda(struct nouveau_i2c_port *port, int state)
40{
41 nouveau_i2c_drive_sda(port, state);
42}
43
44static inline int
45i2c_sense_scl(struct nouveau_i2c_port *port)
46{
47 return nouveau_i2c_sense_scl(port);
48}
49
50static inline int
51i2c_sense_sda(struct nouveau_i2c_port *port)
52{
53 return nouveau_i2c_sense_sda(port);
54}
55
56static void
57i2c_delay(struct nouveau_i2c_port *port, u32 nsec)
58{
59 udelay((nsec + 500) / 1000);
60}
61
62static bool
63i2c_raise_scl(struct nouveau_i2c_port *port)
64{
65 u32 timeout = T_TIMEOUT / T_RISEFALL;
66
67 i2c_drive_scl(port, 1);
68 do {
69 i2c_delay(port, T_RISEFALL);
70 } while (!i2c_sense_scl(port) && --timeout);
71
72 return timeout != 0;
73}
74
75static int
76i2c_start(struct nouveau_i2c_port *port)
77{
78 int ret = 0;
79
80 port->state = i2c_sense_scl(port);
81 port->state |= i2c_sense_sda(port) << 1;
82 if (port->state != 3) {
83 i2c_drive_scl(port, 0);
84 i2c_drive_sda(port, 1);
85 if (!i2c_raise_scl(port))
86 ret = -EBUSY;
87 }
88
89 i2c_drive_sda(port, 0);
90 i2c_delay(port, T_HOLD);
91 i2c_drive_scl(port, 0);
92 i2c_delay(port, T_HOLD);
93 return ret;
94}
95
96static void
97i2c_stop(struct nouveau_i2c_port *port)
98{
99 i2c_drive_scl(port, 0);
100 i2c_drive_sda(port, 0);
101 i2c_delay(port, T_RISEFALL);
102
103 i2c_drive_scl(port, 1);
104 i2c_delay(port, T_HOLD);
105 i2c_drive_sda(port, 1);
106 i2c_delay(port, T_HOLD);
107}
108
109static int
110i2c_bitw(struct nouveau_i2c_port *port, int sda)
111{
112 i2c_drive_sda(port, sda);
113 i2c_delay(port, T_RISEFALL);
114
115 if (!i2c_raise_scl(port))
116 return -ETIMEDOUT;
117 i2c_delay(port, T_HOLD);
118
119 i2c_drive_scl(port, 0);
120 i2c_delay(port, T_HOLD);
121 return 0;
122}
123
124static int
125i2c_bitr(struct nouveau_i2c_port *port)
126{
127 int sda;
128
129 i2c_drive_sda(port, 1);
130 i2c_delay(port, T_RISEFALL);
131
132 if (!i2c_raise_scl(port))
133 return -ETIMEDOUT;
134 i2c_delay(port, T_HOLD);
135
136 sda = i2c_sense_sda(port);
137
138 i2c_drive_scl(port, 0);
139 i2c_delay(port, T_HOLD);
140 return sda;
141}
142
143static int
144i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
145{
146 int i, bit;
147
148 *byte = 0;
149 for (i = 7; i >= 0; i--) {
150 bit = i2c_bitr(port);
151 if (bit < 0)
152 return bit;
153 *byte |= bit << i;
154 }
155
156 return i2c_bitw(port, last ? 1 : 0);
157}
158
159static int
160i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
161{
162 int i, ret;
163 for (i = 7; i >= 0; i--) {
164 ret = i2c_bitw(port, !!(byte & (1 << i)));
165 if (ret < 0)
166 return ret;
167 }
168
169 ret = i2c_bitr(port);
170 if (ret == 1) /* nack */
171 ret = -EIO;
172 return ret;
173}
174
175static int
176i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
177{
178 u32 addr = msg->addr << 1;
179 if (msg->flags & I2C_M_RD)
180 addr |= 1;
181 return i2c_put_byte(port, addr);
182}
183
184static int
185i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
186{
187 struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
188 struct i2c_msg *msg = msgs;
189 int ret = 0, mcnt = num;
190
191 while (!ret && mcnt--) {
192 u8 remaining = msg->len;
193 u8 *ptr = msg->buf;
194
195 ret = i2c_start(port);
196 if (ret == 0)
197 ret = i2c_addr(port, msg);
198
199 if (msg->flags & I2C_M_RD) {
200 while (!ret && remaining--)
201 ret = i2c_get_byte(port, ptr++, !remaining);
202 } else {
203 while (!ret && remaining--)
204 ret = i2c_put_byte(port, *ptr++);
205 }
206
207 msg++;
208 }
209
210 i2c_stop(port);
211 return (ret < 0) ? ret : num;
212}
213#else
214static int
215i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
216{
217 return -ENODEV;
218}
219#endif
220
221static u32
222i2c_bit_func(struct i2c_adapter *adap)
223{
224 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
225}
226
227const struct i2c_algorithm nouveau_i2c_bit_algo = {
228 .master_xfer = i2c_bit_xfer,
229 .functionality = i2c_bit_func
230};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
new file mode 100644
index 000000000000..4e977ff27e44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ibus.h>
26
27struct nvc0_ibus_priv {
28 struct nouveau_ibus base;
29};
30
31static void
32nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
33{
34 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
35 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
36 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
37 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
38 nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
39}
40
41static void
42nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
43{
44 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
45 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
46 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
47 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
48 nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
49}
50
51static void
52nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
53{
54 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
55 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
56 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
57 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
58 nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
59}
60
61static void
62nvc0_ibus_intr(struct nouveau_subdev *subdev)
63{
64 struct nvc0_ibus_priv *priv = (void *)subdev;
65 u32 intr0 = nv_rd32(priv, 0x121c58);
66 u32 intr1 = nv_rd32(priv, 0x121c5c);
67 u32 hubnr = nv_rd32(priv, 0x121c70);
68 u32 ropnr = nv_rd32(priv, 0x121c74);
69 u32 gpcnr = nv_rd32(priv, 0x121c78);
70 u32 i;
71
72 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
73 u32 stat = 0x00000100 << i;
74 if (intr0 & stat) {
75 nvc0_ibus_intr_hub(priv, i);
76 intr0 &= ~stat;
77 }
78 }
79
80 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
81 u32 stat = 0x00010000 << i;
82 if (intr0 & stat) {
83 nvc0_ibus_intr_rop(priv, i);
84 intr0 &= ~stat;
85 }
86 }
87
88 for (i = 0; intr1 && i < gpcnr; i++) {
89 u32 stat = 0x00000001 << i;
90 if (intr1 & stat) {
91 nvc0_ibus_intr_gpc(priv, i);
92 intr1 &= ~stat;
93 }
94 }
95}
96
97static int
98nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 struct nouveau_oclass *oclass, void *data, u32 size,
100 struct nouveau_object **pobject)
101{
102 struct nvc0_ibus_priv *priv;
103 int ret;
104
105 ret = nouveau_ibus_create(parent, engine, oclass, &priv);
106 *pobject = nv_object(priv);
107 if (ret)
108 return ret;
109
110 nv_subdev(priv)->intr = nvc0_ibus_intr;
111 return 0;
112}
113
114struct nouveau_oclass
115nvc0_ibus_oclass = {
116 .handle = NV_SUBDEV(IBUS, 0xc0),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nvc0_ibus_ctor,
119 .dtor = _nouveau_ibus_dtor,
120 .init = _nouveau_ibus_init,
121 .fini = _nouveau_ibus_fini,
122 },
123};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
new file mode 100644
index 000000000000..7120124dceac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ibus.h>
26
27struct nve0_ibus_priv {
28 struct nouveau_ibus base;
29};
30
31static void
32nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
33{
34 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
35 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
36 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
37 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
38 nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
39}
40
41static void
42nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
43{
44 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
45 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
46 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
47 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
48 nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
49}
50
51static void
52nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
53{
54 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
55 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
56 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
57 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
58 nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
59}
60
61static void
62nve0_ibus_intr(struct nouveau_subdev *subdev)
63{
64 struct nve0_ibus_priv *priv = (void *)subdev;
65 u32 intr0 = nv_rd32(priv, 0x120058);
66 u32 intr1 = nv_rd32(priv, 0x12005c);
67 u32 hubnr = nv_rd32(priv, 0x120070);
68 u32 ropnr = nv_rd32(priv, 0x120074);
69 u32 gpcnr = nv_rd32(priv, 0x120078);
70 u32 i;
71
72 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
73 u32 stat = 0x00000100 << i;
74 if (intr0 & stat) {
75 nve0_ibus_intr_hub(priv, i);
76 intr0 &= ~stat;
77 }
78 }
79
80 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
81 u32 stat = 0x00010000 << i;
82 if (intr0 & stat) {
83 nve0_ibus_intr_rop(priv, i);
84 intr0 &= ~stat;
85 }
86 }
87
88 for (i = 0; intr1 && i < gpcnr; i++) {
89 u32 stat = 0x00000001 << i;
90 if (intr1 & stat) {
91 nve0_ibus_intr_gpc(priv, i);
92 intr1 &= ~stat;
93 }
94 }
95}
96
97static int
98nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 struct nouveau_oclass *oclass, void *data, u32 size,
100 struct nouveau_object **pobject)
101{
102 struct nve0_ibus_priv *priv;
103 int ret;
104
105 ret = nouveau_ibus_create(parent, engine, oclass, &priv);
106 *pobject = nv_object(priv);
107 if (ret)
108 return ret;
109
110 nv_subdev(priv)->intr = nve0_ibus_intr;
111 return 0;
112}
113
114struct nouveau_oclass
115nve0_ibus_oclass = {
116 .handle = NV_SUBDEV(IBUS, 0xe0),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nve0_ibus_ctor,
119 .dtor = _nouveau_ibus_dtor,
120 .init = _nouveau_ibus_init,
121 .fini = _nouveau_ibus_fini,
122 },
123};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
new file mode 100644
index 000000000000..1188227ca6aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/instmem.h>
26
27int
28nouveau_instobj_create_(struct nouveau_object *parent,
29 struct nouveau_object *engine,
30 struct nouveau_oclass *oclass,
31 int length, void **pobject)
32{
33 struct nouveau_instmem *imem = (void *)engine;
34 struct nouveau_instobj *iobj;
35 int ret;
36
37 ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
38 length, pobject);
39 iobj = *pobject;
40 if (ret)
41 return ret;
42
43 list_add(&iobj->head, &imem->list);
44 return 0;
45}
46
47void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{
50 if (iobj->head.prev)
51 list_del(&iobj->head);
52 return nouveau_object_destroy(&iobj->base);
53}
54
55void
56_nouveau_instobj_dtor(struct nouveau_object *object)
57{
58 struct nouveau_instobj *iobj = (void *)object;
59 return nouveau_instobj_destroy(iobj);
60}
61
62int
63nouveau_instmem_create_(struct nouveau_object *parent,
64 struct nouveau_object *engine,
65 struct nouveau_oclass *oclass,
66 int length, void **pobject)
67{
68 struct nouveau_instmem *imem;
69 int ret;
70
71 ret = nouveau_subdev_create_(parent, engine, oclass, 0,
72 "INSTMEM", "instmem", length, pobject);
73 imem = *pobject;
74 if (ret)
75 return ret;
76
77 INIT_LIST_HEAD(&imem->list);
78 return 0;
79}
80
81int
82nouveau_instmem_init(struct nouveau_instmem *imem)
83{
84 struct nouveau_instobj *iobj;
85 int ret, i;
86
87 ret = nouveau_subdev_init(&imem->base);
88 if (ret)
89 return ret;
90
91 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4)
94 nv_wo32(iobj, i, iobj->suspend[i / 4]);
95 vfree(iobj->suspend);
96 iobj->suspend = NULL;
97 }
98 }
99
100 return 0;
101}
102
103int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{
106 struct nouveau_instobj *iobj;
107 int i;
108
109 if (suspend) {
110 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4)
114 iobj->suspend[i / 4] = nv_ro32(iobj, i);
115 } else
116 return -ENOMEM;
117 }
118 }
119
120 return nouveau_subdev_fini(&imem->base, suspend);
121}
122
123int
124_nouveau_instmem_init(struct nouveau_object *object)
125{
126 struct nouveau_instmem *imem = (void *)object;
127 return nouveau_instmem_init(imem);
128}
129
130int
131_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
132{
133 struct nouveau_instmem *imem = (void *)object;
134 return nouveau_instmem_fini(imem, suspend);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
new file mode 100644
index 000000000000..ba4d28b50368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26
27#include "nv04.h"
28
29static int
30nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
31 struct nouveau_oclass *oclass, void *data, u32 size,
32 struct nouveau_object **pobject)
33{
34 struct nv04_instmem_priv *priv = (void *)engine;
35 struct nv04_instobj_priv *node;
36 int ret, align;
37
38 align = (unsigned long)data;
39 if (!align)
40 align = 1;
41
42 ret = nouveau_instobj_create(parent, engine, oclass, &node);
43 *pobject = nv_object(node);
44 if (ret)
45 return ret;
46
47 ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
48 if (ret)
49 return ret;
50
51 node->base.addr = node->mem->offset;
52 node->base.size = node->mem->length;
53 return 0;
54}
55
56static void
57nv04_instobj_dtor(struct nouveau_object *object)
58{
59 struct nv04_instmem_priv *priv = (void *)object->engine;
60 struct nv04_instobj_priv *node = (void *)object;
61 nouveau_mm_free(&priv->heap, &node->mem);
62 nouveau_instobj_destroy(&node->base);
63}
64
65static u32
66nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
67{
68 struct nv04_instobj_priv *node = (void *)object;
69 return nv_ro32(object->engine, node->mem->offset + addr);
70}
71
72static void
73nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
74{
75 struct nv04_instobj_priv *node = (void *)object;
76 nv_wo32(object->engine, node->mem->offset + addr, data);
77}
78
79static struct nouveau_oclass
80nv04_instobj_oclass = {
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv04_instobj_ctor,
83 .dtor = nv04_instobj_dtor,
84 .init = _nouveau_instobj_init,
85 .fini = _nouveau_instobj_fini,
86 .rd32 = nv04_instobj_rd32,
87 .wr32 = nv04_instobj_wr32,
88 },
89};
90
91int
92nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
93 u32 size, u32 align, struct nouveau_object **pobject)
94{
95 struct nouveau_object *engine = nv_object(imem);
96 struct nv04_instmem_priv *priv = (void *)(imem);
97 int ret;
98
99 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
100 (void *)(unsigned long)align, size, pobject);
101 if (ret)
102 return ret;
103
104 /* INSTMEM itself creates objects to reserve (and preserve across
105 * suspend/resume) various fixed data locations, each one of these
106 * takes a reference on INSTMEM itself, causing it to never be
107 * freed. We drop all the self-references here to avoid this.
108 */
109 if (unlikely(!priv->created))
110 atomic_dec(&engine->refcount);
111
112 return 0;
113}
114
115static int
116nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject)
119{
120 struct nv04_instmem_priv *priv;
121 int ret;
122
123 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
124 *pobject = nv_object(priv);
125 if (ret)
126 return ret;
127
128 /* PRAMIN aperture maps over the end of VRAM, reserve it */
129 priv->base.reserved = 512 * 1024;
130 priv->base.alloc = nv04_instmem_alloc;
131
132 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
133 if (ret)
134 return ret;
135
136 /* 0x00000-0x10000: reserve for probable vbios image */
137 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
138 if (ret)
139 return ret;
140
141 /* 0x10000-0x18000: reserve for RAMHT */
142 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
143 if (ret)
144 return ret;
145
146 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
147 ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
149 if (ret)
150 return ret;
151
152 /* 0x18800-0x18a00: reserve for RAMRO */
153 ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
154 if (ret)
155 return ret;
156
157 priv->created = true;
158 return 0;
159}
160
161void
162nv04_instmem_dtor(struct nouveau_object *object)
163{
164 struct nv04_instmem_priv *priv = (void *)object;
165 nouveau_gpuobj_ref(NULL, &priv->ramfc);
166 nouveau_gpuobj_ref(NULL, &priv->ramro);
167 nouveau_ramht_ref(NULL, &priv->ramht);
168 nouveau_gpuobj_ref(NULL, &priv->vbios);
169 nouveau_mm_fini(&priv->heap);
170 if (priv->iomem)
171 iounmap(priv->iomem);
172 nouveau_instmem_destroy(&priv->base);
173}
174
175static u32
176nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
177{
178 return nv_rd32(object, 0x700000 + addr);
179}
180
181static void
182nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
183{
184 return nv_wr32(object, 0x700000 + addr, data);
185}
186
187struct nouveau_oclass
188nv04_instmem_oclass = {
189 .handle = NV_SUBDEV(INSTMEM, 0x04),
190 .ofuncs = &(struct nouveau_ofuncs) {
191 .ctor = nv04_instmem_ctor,
192 .dtor = nv04_instmem_dtor,
193 .init = _nouveau_instmem_init,
194 .fini = _nouveau_instmem_fini,
195 .rd32 = nv04_instmem_rd32,
196 .wr32 = nv04_instmem_wr32,
197 },
198};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
new file mode 100644
index 000000000000..7983d8d9b358
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -0,0 +1,39 @@
1#ifndef __NV04_INSTMEM_H__
2#define __NV04_INSTMEM_H__
3
4#include <core/gpuobj.h>
5#include <core/ramht.h>
6#include <core/mm.h>
7
8#include <subdev/instmem.h>
9
10struct nv04_instmem_priv {
11 struct nouveau_instmem base;
12 bool created;
13
14 void __iomem *iomem;
15 struct nouveau_mm heap;
16
17 struct nouveau_gpuobj *vbios;
18 struct nouveau_ramht *ramht;
19 struct nouveau_gpuobj *ramro;
20 struct nouveau_gpuobj *ramfc;
21};
22
23static inline struct nv04_instmem_priv *
24nv04_instmem(void *obj)
25{
26 return (void *)nouveau_instmem(obj);
27}
28
29struct nv04_instobj_priv {
30 struct nouveau_instobj base;
31 struct nouveau_mm_node *mem;
32};
33
34void nv04_instmem_dtor(struct nouveau_object *);
35
36int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
37 u32 size, u32 align, struct nouveau_object **pobject);
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
new file mode 100644
index 000000000000..73c52ebd5932
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -0,0 +1,138 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27static inline int
28nv44_graph_class(struct nv04_instmem_priv *priv)
29{
30 if ((nv_device(priv)->chipset & 0xf0) == 0x60)
31 return 1;
32 return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
33}
34
35static int
36nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *data, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_device *device = nv_device(parent);
41 struct pci_dev *pdev = device->pdev;
42 struct nv04_instmem_priv *priv;
43 int ret, bar, vs;
44
45 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
46 *pobject = nv_object(priv);
47 if (ret)
48 return ret;
49
50 /* map bar */
51 if (pci_resource_len(pdev, 2))
52 bar = 2;
53 else
54 bar = 3;
55
56 priv->iomem = ioremap(pci_resource_start(pdev, bar),
57 pci_resource_len(pdev, bar));
58 if (!priv->iomem) {
59 nv_error(priv, "unable to map PRAMIN BAR\n");
60 return -EFAULT;
61 }
62
63 /* PRAMIN aperture maps over the end of vram, reserve enough space
64 * to fit graphics contexts for every channel, the magics come
65 * from engine/graph/nv40.c
66 */
67 vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
68 if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
69 else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
70 else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
71 else priv->base.reserved = 0x4a40 * vs;
72 priv->base.reserved += 16 * 1024;
73 priv->base.reserved *= 32; /* per-channel */
74 priv->base.reserved += 512 * 1024; /* pci(e)gart table */
75 priv->base.reserved += 512 * 1024; /* object storage */
76
77 priv->base.reserved = round_up(priv->base.reserved, 4096);
78 priv->base.alloc = nv04_instmem_alloc;
79
80 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
81 if (ret)
82 return ret;
83
84 /* 0x00000-0x10000: reserve for probable vbios image */
85 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
86 if (ret)
87 return ret;
88
89 /* 0x10000-0x18000: reserve for RAMHT */
90 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
91 if (ret)
92 return ret;
93
94 /* 0x18000-0x18200: reserve for RAMRO
95 * 0x18200-0x20000: padding
96 */
97 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
98 if (ret)
99 return ret;
100
101 /* 0x20000-0x21000: reserve for RAMFC
102 * 0x21000-0x40000: padding and some unknown crap
103 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
105 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
106 if (ret)
107 return ret;
108
109 priv->created = true;
110 return 0;
111}
112
113static u32
114nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
115{
116 struct nv04_instmem_priv *priv = (void *)object;
117 return ioread32_native(priv->iomem + addr);
118}
119
120static void
121nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
122{
123 struct nv04_instmem_priv *priv = (void *)object;
124 iowrite32_native(data, priv->iomem + addr);
125}
126
127struct nouveau_oclass
128nv40_instmem_oclass = {
129 .handle = NV_SUBDEV(INSTMEM, 0x40),
130 .ofuncs = &(struct nouveau_ofuncs) {
131 .ctor = nv40_instmem_ctor,
132 .dtor = nv04_instmem_dtor,
133 .init = _nouveau_instmem_init,
134 .fini = _nouveau_instmem_fini,
135 .rd32 = nv40_instmem_rd32,
136 .wr32 = nv40_instmem_wr32,
137 },
138};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
new file mode 100644
index 000000000000..27ef0891d10b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/instmem.h>
26#include <subdev/fb.h>
27
28#include <core/mm.h>
29
30struct nv50_instmem_priv {
31 struct nouveau_instmem base;
32 spinlock_t lock;
33 u64 addr;
34};
35
36struct nv50_instobj_priv {
37 struct nouveau_instobj base;
38 struct nouveau_mem *mem;
39};
40
41static int
42nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
43 struct nouveau_oclass *oclass, void *data, u32 size,
44 struct nouveau_object **pobject)
45{
46 struct nouveau_fb *pfb = nouveau_fb(parent);
47 struct nv50_instobj_priv *node;
48 u32 align = (unsigned long)data;
49 int ret;
50
51 size = max((size + 4095) & ~4095, (u32)4096);
52 align = max((align + 4095) & ~4095, (u32)4096);
53
54 ret = nouveau_instobj_create(parent, engine, oclass, &node);
55 *pobject = nv_object(node);
56 if (ret)
57 return ret;
58
59 ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
60 if (ret)
61 return ret;
62
63 node->base.addr = node->mem->offset;
64 node->base.size = node->mem->size << 12;
65 node->mem->page_shift = 12;
66 return 0;
67}
68
69static void
70nv50_instobj_dtor(struct nouveau_object *object)
71{
72 struct nv50_instobj_priv *node = (void *)object;
73 struct nouveau_fb *pfb = nouveau_fb(object);
74 pfb->ram.put(pfb, &node->mem);
75 nouveau_instobj_destroy(&node->base);
76}
77
78static u32
79nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
80{
81 struct nv50_instmem_priv *priv = (void *)object->engine;
82 struct nv50_instobj_priv *node = (void *)object;
83 unsigned long flags;
84 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
85 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
86 u32 data;
87
88 spin_lock_irqsave(&priv->lock, flags);
89 if (unlikely(priv->addr != base)) {
90 nv_wr32(priv, 0x001700, base >> 16);
91 priv->addr = base;
92 }
93 data = nv_rd32(priv, 0x700000 + addr);
94 spin_unlock_irqrestore(&priv->lock, flags);
95 return data;
96}
97
98static void
99nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
100{
101 struct nv50_instmem_priv *priv = (void *)object->engine;
102 struct nv50_instobj_priv *node = (void *)object;
103 unsigned long flags;
104 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
105 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
106
107 spin_lock_irqsave(&priv->lock, flags);
108 if (unlikely(priv->addr != base)) {
109 nv_wr32(priv, 0x001700, base >> 16);
110 priv->addr = base;
111 }
112 nv_wr32(priv, 0x700000 + addr, data);
113 spin_unlock_irqrestore(&priv->lock, flags);
114}
115
116static struct nouveau_oclass
117nv50_instobj_oclass = {
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv50_instobj_ctor,
120 .dtor = nv50_instobj_dtor,
121 .init = _nouveau_instobj_init,
122 .fini = _nouveau_instobj_fini,
123 .rd32 = nv50_instobj_rd32,
124 .wr32 = nv50_instobj_wr32,
125 },
126};
127
128static int
129nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
130 u32 size, u32 align, struct nouveau_object **pobject)
131{
132 struct nouveau_object *engine = nv_object(imem);
133 return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
134 (void *)(unsigned long)align, size, pobject);
135}
136
137static int
138nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
139 struct nouveau_oclass *oclass, void *data, u32 size,
140 struct nouveau_object **pobject)
141{
142 struct nv50_instmem_priv *priv;
143 int ret;
144
145 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
146 *pobject = nv_object(priv);
147 if (ret)
148 return ret;
149
150 spin_lock_init(&priv->lock);
151 priv->base.alloc = nv50_instmem_alloc;
152 return 0;
153}
154
155static int
156nv50_instmem_fini(struct nouveau_object *object, bool suspend)
157{
158 struct nv50_instmem_priv *priv = (void *)object;
159 priv->addr = ~0ULL;
160 return nouveau_instmem_fini(&priv->base, suspend);
161}
162
163struct nouveau_oclass
164nv50_instmem_oclass = {
165 .handle = NV_SUBDEV(INSTMEM, 0x50),
166 .ofuncs = &(struct nouveau_ofuncs) {
167 .ctor = nv50_instmem_ctor,
168 .dtor = _nouveau_instmem_dtor,
169 .init = _nouveau_instmem_init,
170 .fini = nv50_instmem_fini,
171 },
172};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
new file mode 100644
index 000000000000..078a2b9d6bd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ltcg.h>
26
27struct nvc0_ltcg_priv {
28 struct nouveau_ltcg base;
29 u32 subp_nr;
30};
31
32static void
33nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp)
34{
35 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
36 u32 stat = nv_rd32(priv, subp_base + 0x020);
37
38 if (stat) {
39 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat);
40 nv_wr32(priv, subp_base + 0x020, stat);
41 }
42}
43
44static void
45nvc0_ltcg_intr(struct nouveau_subdev *subdev)
46{
47 struct nvc0_ltcg_priv *priv = (void *)subdev;
48 u32 units;
49
50 units = nv_rd32(priv, 0x00017c);
51 while (units) {
52 u32 subp, unit = ffs(units) - 1;
53 for (subp = 0; subp < priv->subp_nr; subp++)
54 nvc0_ltcg_subp_isr(priv, unit, subp);
55 units &= ~(1 << unit);
56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
62}
63
64static int
65nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_ltcg_priv *priv;
70 int ret;
71
72 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
73 *pobject = nv_object(priv);
74 if (ret)
75 return ret;
76
77 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24;
78 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
79
80 nv_subdev(priv)->intr = nvc0_ltcg_intr;
81 return 0;
82}
83
84struct nouveau_oclass
85nvc0_ltcg_oclass = {
86 .handle = NV_SUBDEV(LTCG, 0xc0),
87 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nvc0_ltcg_ctor,
89 .dtor = _nouveau_ltcg_dtor,
90 .init = _nouveau_ltcg_init,
91 .fini = _nouveau_ltcg_fini,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
new file mode 100644
index 000000000000..de5721cfc4c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27void
28nouveau_mc_intr(struct nouveau_subdev *subdev)
29{
30 struct nouveau_mc *pmc = nouveau_mc(subdev);
31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit;
33 u32 stat;
34
35 stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) {
37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit);
39 if (unit && unit->intr)
40 unit->intr(unit);
41 stat &= ~map->stat;
42 }
43 map++;
44 }
45
46 if (stat) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 }
49}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
new file mode 100644
index 000000000000..23ebe477a6f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv04_mc_priv {
28 struct nouveau_mc base;
29};
30
31const struct nouveau_mc_intr
32nv04_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
37 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
39 { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
40 { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */
41 { 0x80000000, NVDEV_ENGINE_SW },
42 {}
43};
44
45static int
46nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv04_mc_priv *priv;
51 int ret;
52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv);
55 if (ret)
56 return ret;
57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv04_mc_intr;
60 return 0;
61}
62
63int
64nv04_mc_init(struct nouveau_object *object)
65{
66 struct nv04_mc_priv *priv = (void *)object;
67
68 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
69 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
70
71 return nouveau_mc_init(&priv->base);
72}
73
74struct nouveau_oclass
75nv04_mc_oclass = {
76 .handle = NV_SUBDEV(MC, 0x04),
77 .ofuncs = &(struct nouveau_ofuncs) {
78 .ctor = nv04_mc_ctor,
79 .dtor = _nouveau_mc_dtor,
80 .init = nv04_mc_init,
81 .fini = _nouveau_mc_fini,
82 },
83};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
new file mode 100644
index 000000000000..397d868359ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv44_mc_priv {
28 struct nouveau_mc base;
29};
30
31static int
32nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv44_mc_priv *priv;
37 int ret;
38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 nv_subdev(priv)->intr = nouveau_mc_intr;
45 priv->base.intr_map = nv04_mc_intr;
46 return 0;
47}
48
49static int
50nv44_mc_init(struct nouveau_object *object)
51{
52 struct nv44_mc_priv *priv = (void *)object;
53 u32 tmp = nv_rd32(priv, 0x10020c);
54
55 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
56
57 nv_wr32(priv, 0x001700, tmp);
58 nv_wr32(priv, 0x001704, 0);
59 nv_wr32(priv, 0x001708, 0);
60 nv_wr32(priv, 0x00170c, tmp);
61
62 return nouveau_mc_init(&priv->base);
63}
64
65struct nouveau_oclass
66nv44_mc_oclass = {
67 .handle = NV_SUBDEV(MC, 0x44),
68 .ofuncs = &(struct nouveau_ofuncs) {
69 .ctor = nv44_mc_ctor,
70 .dtor = _nouveau_mc_dtor,
71 .init = nv44_mc_init,
72 .fini = _nouveau_mc_fini,
73 },
74};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
new file mode 100644
index 000000000000..cedf33b02977
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv50_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv50_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG },
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84- */
37 { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
38 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x80000000, NVDEV_ENGINE_SW },
42 {},
43};
44
45static int
46nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv50_mc_priv *priv;
51 int ret;
52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv);
55 if (ret)
56 return ret;
57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv50_mc_intr;
60 return 0;
61}
62
63int
64nv50_mc_init(struct nouveau_object *object)
65{
66 struct nv50_mc_priv *priv = (void *)object;
67 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
68 return nouveau_mc_init(&priv->base);
69}
70
71struct nouveau_oclass
72nv50_mc_oclass = {
73 .handle = NV_SUBDEV(MC, 0x50),
74 .ofuncs = &(struct nouveau_ofuncs) {
75 .ctor = nv50_mc_ctor,
76 .dtor = _nouveau_mc_dtor,
77 .init = nv50_mc_init,
78 .fini = _nouveau_mc_fini,
79 },
80};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
new file mode 100644
index 000000000000..a001e4c4d38d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv98_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv98_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x80000000, NVDEV_ENGINE_SW },
43 {},
44};
45
46static int
47nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
48 struct nouveau_oclass *oclass, void *data, u32 size,
49 struct nouveau_object **pobject)
50{
51 struct nv98_mc_priv *priv;
52 int ret;
53
54 ret = nouveau_mc_create(parent, engine, oclass, &priv);
55 *pobject = nv_object(priv);
56 if (ret)
57 return ret;
58
59 nv_subdev(priv)->intr = nouveau_mc_intr;
60 priv->base.intr_map = nv98_mc_intr;
61 return 0;
62}
63
64struct nouveau_oclass
65nv98_mc_oclass = {
66 .handle = NV_SUBDEV(MC, 0x98),
67 .ofuncs = &(struct nouveau_ofuncs) {
68 .ctor = nv98_mc_ctor,
69 .dtor = _nouveau_mc_dtor,
70 .init = nv50_mc_init,
71 .fini = _nouveau_mc_fini,
72 },
73};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
new file mode 100644
index 000000000000..c2b81e30a17d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nvc0_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nvc0_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000020, NVDEV_ENGINE_COPY0 },
35 { 0x00000040, NVDEV_ENGINE_COPY1 },
36 { 0x00000100, NVDEV_ENGINE_FIFO },
37 { 0x00001000, NVDEV_ENGINE_GR },
38 { 0x00008000, NVDEV_ENGINE_BSP },
39 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO },
41 { 0x02000000, NVDEV_SUBDEV_LTCG },
42 { 0x04000000, NVDEV_ENGINE_DISP },
43 { 0x40000000, NVDEV_SUBDEV_IBUS },
44 { 0x80000000, NVDEV_ENGINE_SW },
45 {},
46};
47
48static int
49nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nvc0_mc_priv *priv;
54 int ret;
55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv);
57 *pobject = nv_object(priv);
58 if (ret)
59 return ret;
60
61 nv_subdev(priv)->intr = nouveau_mc_intr;
62 priv->base.intr_map = nvc0_mc_intr;
63 return 0;
64}
65
66struct nouveau_oclass
67nvc0_mc_oclass = {
68 .handle = NV_SUBDEV(MC, 0xc0),
69 .ofuncs = &(struct nouveau_ofuncs) {
70 .ctor = nvc0_mc_ctor,
71 .dtor = _nouveau_mc_dtor,
72 .init = nv50_mc_init,
73 .fini = _nouveau_mc_fini,
74 },
75};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
new file mode 100644
index 000000000000..93e3ddf7303a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26
27#include <subdev/i2c.h>
28#include <subdev/mxm.h>
29#include <subdev/bios.h>
30#include <subdev/bios/mxm.h>
31
32#include "mxms.h"
33
34static bool
35mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
36 u8 offset, u8 size, u8 *data)
37{
38 struct i2c_msg msgs[] = {
39 { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
40 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
41 };
42
43 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
44}
45
46static bool
47mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
48{
49 struct nouveau_bios *bios = nouveau_bios(mxm);
50 struct nouveau_i2c *i2c = nouveau_i2c(mxm);
51 struct nouveau_i2c_port *port = NULL;
52 u8 i2cidx, mxms[6], addr, size;
53
54 i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
55 if (i2cidx < 0x0f)
56 port = i2c->find(i2c, i2cidx);
57 if (!port)
58 return false;
59
60 addr = 0x54;
61 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
62 addr = 0x56;
63 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
64 return false;
65 }
66
67 mxm->mxms = mxms;
68 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
69 mxm->mxms = kmalloc(size, GFP_KERNEL);
70
71 if (mxm->mxms &&
72 mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
73 return true;
74
75 kfree(mxm->mxms);
76 mxm->mxms = NULL;
77 return false;
78}
79
80#if defined(CONFIG_ACPI)
81static bool
82mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
83{
84 struct nouveau_device *device = nv_device(mxm);
85 static char muid[] = {
86 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
87 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
88 };
89 u32 mxms_args[] = { 0x00000000 };
90 union acpi_object args[4] = {
91 /* _DSM MUID */
92 { .buffer.type = 3,
93 .buffer.length = sizeof(muid),
94 .buffer.pointer = muid,
95 },
96 /* spec says this can be zero to mean "highest revision", but
97 * of course there's at least one bios out there which fails
98 * unless you pass in exactly the version it supports..
99 */
100 { .integer.type = ACPI_TYPE_INTEGER,
101 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
102 },
103 /* MXMS function */
104 { .integer.type = ACPI_TYPE_INTEGER,
105 .integer.value = 0x00000010,
106 },
107 /* Pointer to MXMS arguments */
108 { .buffer.type = ACPI_TYPE_BUFFER,
109 .buffer.length = sizeof(mxms_args),
110 .buffer.pointer = (char *)mxms_args,
111 },
112 };
113 struct acpi_object_list list = { ARRAY_SIZE(args), args };
114 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
115 union acpi_object *obj;
116 acpi_handle handle;
117 int ret;
118
119 handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
120 if (!handle)
121 return false;
122
123 ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
124 if (ret) {
125 nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
126 return false;
127 }
128
129 obj = retn.pointer;
130 if (obj->type == ACPI_TYPE_BUFFER) {
131 mxm->mxms = kmemdup(obj->buffer.pointer,
132 obj->buffer.length, GFP_KERNEL);
133 } else
134 if (obj->type == ACPI_TYPE_INTEGER) {
135 nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
136 }
137
138 kfree(obj);
139 return mxm->mxms != NULL;
140}
141#endif
142
143#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
144
145#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
146
147static u8
148wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
149{
150 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
151 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
152 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
153 union acpi_object *obj;
154 acpi_status status;
155
156 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
157 if (ACPI_FAILURE(status)) {
158 nv_debug(mxm, "WMMX MXMI returned %d\n", status);
159 return 0x00;
160 }
161
162 obj = retn.pointer;
163 if (obj->type == ACPI_TYPE_INTEGER) {
164 version = obj->integer.value;
165 nv_debug(mxm, "WMMX MXMI version %d.%d\n",
166 (version >> 4), version & 0x0f);
167 } else {
168 version = 0;
169 nv_debug(mxm, "WMMX MXMI returned non-integer\n");
170 }
171
172 kfree(obj);
173 return version;
174}
175
176static bool
177mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
178{
179 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
180 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
181 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
182 union acpi_object *obj;
183 acpi_status status;
184
185 if (!wmi_has_guid(WMI_WMMX_GUID)) {
186 nv_debug(mxm, "WMMX GUID not found\n");
187 return false;
188 }
189
190 mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00);
191 if (!mxms_args[1])
192 mxms_args[1] = wmi_wmmx_mxmi(mxm, version);
193 if (!mxms_args[1])
194 return false;
195
196 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
197 if (ACPI_FAILURE(status)) {
198 nv_debug(mxm, "WMMX MXMS returned %d\n", status);
199 return false;
200 }
201
202 obj = retn.pointer;
203 if (obj->type == ACPI_TYPE_BUFFER) {
204 mxm->mxms = kmemdup(obj->buffer.pointer,
205 obj->buffer.length, GFP_KERNEL);
206 }
207
208 kfree(obj);
209 return mxm->mxms != NULL;
210}
211#endif
212
213static struct mxm_shadow_h {
214 const char *name;
215 bool (*exec)(struct nouveau_mxm *, u8 version);
216} _mxm_shadow[] = {
217 { "ROM", mxm_shadow_rom },
218#if defined(CONFIG_ACPI)
219 { "DSM", mxm_shadow_dsm },
220#endif
221#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
222 { "WMI", mxm_shadow_wmi },
223#endif
224 {}
225};
226
227static int
228mxm_shadow(struct nouveau_mxm *mxm, u8 version)
229{
230 struct mxm_shadow_h *shadow = _mxm_shadow;
231 do {
232 nv_debug(mxm, "checking %s\n", shadow->name);
233 if (shadow->exec(mxm, version)) {
234 if (mxms_valid(mxm))
235 return 0;
236 kfree(mxm->mxms);
237 mxm->mxms = NULL;
238 }
239 } while ((++shadow)->name);
240 return -ENOENT;
241}
242
243int
244nouveau_mxm_create_(struct nouveau_object *parent,
245 struct nouveau_object *engine,
246 struct nouveau_oclass *oclass, int length, void **pobject)
247{
248 struct nouveau_device *device = nv_device(parent);
249 struct nouveau_bios *bios = nouveau_bios(device);
250 struct nouveau_mxm *mxm;
251 u8 ver, len;
252 u16 data;
253 int ret;
254
255 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
256 length, pobject);
257 mxm = *pobject;
258 if (ret)
259 return ret;
260
261 data = mxm_table(bios, &ver, &len);
262 if (!data || !(ver = nv_ro08(bios, data))) {
263 nv_info(mxm, "no VBIOS data, nothing to do\n");
264 return 0;
265 }
266
267 nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
268
269 if (mxm_shadow(mxm, ver)) {
270 nv_info(mxm, "failed to locate valid SIS\n");
271#if 0
272 /* we should, perhaps, fall back to some kind of limited
273 * mode here if the x86 vbios hasn't already done the
274 * work for us (so we prevent loading with completely
275 * whacked vbios tables).
276 */
277 return -EINVAL;
278#else
279 return 0;
280#endif
281 }
282
283 nv_info(mxm, "MXMS Version %d.%d\n",
284 mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
285 mxms_foreach(mxm, 0, NULL, NULL);
286
287 if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true))
288 mxm->action |= MXM_SANITISE_DCB;
289 return 0;
290}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
new file mode 100644
index 000000000000..839ca1edc132
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mxm.h>
26#include "mxms.h"
27
28#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
29#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
30
31static u8 *
32mxms_data(struct nouveau_mxm *mxm)
33{
34 return mxm->mxms;
35
36}
37
38u16
39mxms_version(struct nouveau_mxm *mxm)
40{
41 u8 *mxms = mxms_data(mxm);
42 u16 version = (mxms[4] << 8) | mxms[5];
43 switch (version ) {
44 case 0x0200:
45 case 0x0201:
46 case 0x0300:
47 return version;
48 default:
49 break;
50 }
51
52 nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
53 return 0x0000;
54}
55
56u16
57mxms_headerlen(struct nouveau_mxm *mxm)
58{
59 return 8;
60}
61
62u16
63mxms_structlen(struct nouveau_mxm *mxm)
64{
65 return *(u16 *)&mxms_data(mxm)[6];
66}
67
68bool
69mxms_checksum(struct nouveau_mxm *mxm)
70{
71 u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
72 u8 *mxms = mxms_data(mxm), sum = 0;
73 while (size--)
74 sum += *mxms++;
75 if (sum) {
76 nv_debug(mxm, "checksum invalid\n");
77 return false;
78 }
79 return true;
80}
81
82bool
83mxms_valid(struct nouveau_mxm *mxm)
84{
85 u8 *mxms = mxms_data(mxm);
86 if (*(u32 *)mxms != 0x5f4d584d) {
87 nv_debug(mxm, "signature invalid\n");
88 return false;
89 }
90
91 if (!mxms_version(mxm) || !mxms_checksum(mxm))
92 return false;
93
94 return true;
95}
96
97bool
98mxms_foreach(struct nouveau_mxm *mxm, u8 types,
99 bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info)
100{
101 u8 *mxms = mxms_data(mxm);
102 u8 *desc = mxms + mxms_headerlen(mxm);
103 u8 *fini = desc + mxms_structlen(mxm) - 1;
104 while (desc < fini) {
105 u8 type = desc[0] & 0x0f;
106 u8 headerlen = 0;
107 u8 recordlen = 0;
108 u8 entries = 0;
109
110 switch (type) {
111 case 0: /* Output Device Structure */
112 if (mxms_version(mxm) >= 0x0300)
113 headerlen = 8;
114 else
115 headerlen = 6;
116 break;
117 case 1: /* System Cooling Capability Structure */
118 case 2: /* Thermal Structure */
119 case 3: /* Input Power Structure */
120 headerlen = 4;
121 break;
122 case 4: /* GPIO Device Structure */
123 headerlen = 4;
124 recordlen = 2;
125 entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
126 break;
127 case 5: /* Vendor Specific Structure */
128 headerlen = 8;
129 break;
130 case 6: /* Backlight Control Structure */
131 if (mxms_version(mxm) >= 0x0300) {
132 headerlen = 4;
133 recordlen = 8;
134 entries = (desc[1] & 0xf0) >> 4;
135 } else {
136 headerlen = 8;
137 }
138 break;
139 case 7: /* Fan Control Structure */
140 headerlen = 8;
141 recordlen = 4;
142 entries = desc[1] & 0x07;
143 break;
144 default:
145 nv_debug(mxm, "unknown descriptor type %d\n", type);
146 return false;
147 }
148
149 if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
150 static const char * mxms_desc_name[] = {
151 "ODS", "SCCS", "TS", "IPS",
152 "GSD", "VSS", "BCS", "FCS",
153 };
154 u8 *dump = desc;
155 int i, j;
156
157 nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
158 for (j = headerlen - 1; j >= 0; j--)
159 printk("%02x", dump[j]);
160 printk("\n");
161 dump += headerlen;
162
163 for (i = 0; i < entries; i++, dump += recordlen) {
164 nv_debug(mxm, " ");
165 for (j = recordlen - 1; j >= 0; j--)
166 printk("%02x", dump[j]);
167 printk("\n");
168 }
169 }
170
171 if (types & (1 << type)) {
172 if (!exec(mxm, desc, info))
173 return false;
174 }
175
176 desc += headerlen + (entries * recordlen);
177 }
178
179 return true;
180}
181
182void
183mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
184{
185 u64 data = ROM32(pdata[0]);
186 if (mxms_version(mxm) >= 0x0300)
187 data |= (u64)ROM16(pdata[4]) << 32;
188
189 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
190 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
191 desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
192 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
193}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
new file mode 100644
index 000000000000..5e0be0c591ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
@@ -0,0 +1,22 @@
1#ifndef __NVMXM_MXMS_H__
2#define __NVMXM_MXMS_H__
3
4struct mxms_odev {
5 u8 outp_type;
6 u8 conn_type;
7 u8 ddc_port;
8 u8 dig_conn;
9};
10
11void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *);
12
13u16 mxms_version(struct nouveau_mxm *);
14u16 mxms_headerlen(struct nouveau_mxm *);
15u16 mxms_structlen(struct nouveau_mxm *);
16bool mxms_checksum(struct nouveau_mxm *);
17bool mxms_valid(struct nouveau_mxm *);
18
19bool mxms_foreach(struct nouveau_mxm *, u8,
20 bool (*)(struct nouveau_mxm *, u8 *, void *), void *);
21
22#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
new file mode 100644
index 000000000000..af129c2e8113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mxm.h>
26#include <subdev/bios.h>
27#include <subdev/bios/conn.h>
28#include <subdev/bios/dcb.h>
29#include <subdev/bios/mxm.h>
30
31#include "mxms.h"
32
33struct nv50_mxm_priv {
34 struct nouveau_mxm base;
35};
36
37struct context {
38 u32 *outp;
39 struct mxms_odev desc;
40};
41
42static bool
43mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
44{
45 struct context *ctx = info;
46 struct mxms_odev desc;
47
48 mxms_output_device(mxm, data, &desc);
49 if (desc.outp_type == 2 &&
50 desc.dig_conn == ctx->desc.dig_conn)
51 return false;
52 return true;
53}
54
55static bool
56mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
57{
58 struct nouveau_bios *bios = nouveau_bios(mxm);
59 struct context *ctx = info;
60 u64 desc = *(u64 *)data;
61
62 mxms_output_device(mxm, data, &ctx->desc);
63
64 /* match dcb encoder type to mxm-ods device type */
65 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
66 return true;
67
68 /* digital output, have some extra stuff to match here, there's a
69 * table in the vbios that provides a mapping from the mxm digital
70 * connection enum values to SOR/link
71 */
72 if ((desc & 0x00000000000000f0) >= 0x20) {
73 /* check against sor index */
74 u8 link = mxm_sor_map(bios, ctx->desc.dig_conn);
75 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
76 return true;
77
78 /* check dcb entry has a compatible link field */
79 link = (link & 0x30) >> 4;
80 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
81 return true;
82 }
83
84 /* mark this descriptor accounted for by setting invalid device type,
85 * except of course some manufactures don't follow specs properly and
86 * we need to avoid killing off the TMDS function on DP connectors
87 * if MXM-SIS is missing an entry for it.
88 */
89 data[0] &= ~0xf0;
90 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
91 mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) {
92 data[0] |= 0x20; /* modify descriptor to match TMDS now */
93 } else {
94 data[0] |= 0xf0;
95 }
96
97 return false;
98}
99
100static int
101mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
102{
103 struct nouveau_mxm *mxm = nouveau_mxm(bios);
104 struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
105 u8 type, i2cidx, link, ver, len;
106 u8 *conn;
107
108 /* look for an output device structure that matches this dcb entry.
109 * if one isn't found, disable it.
110 */
111 if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
112 nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
113 idx, ctx.outp[0], ctx.outp[1]);
114 ctx.outp[0] |= 0x0000000f;
115 return 0;
116 }
117
118 /* modify the output's ddc/aux port, there's a pointer to a table
119 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
120 * vbios mxm table
121 */
122 i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port);
123 if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP)
124 i2cidx = (i2cidx & 0x0f) << 4;
125 else
126 i2cidx = (i2cidx & 0xf0);
127
128 if (i2cidx != 0xf0) {
129 ctx.outp[0] &= ~0x000000f0;
130 ctx.outp[0] |= i2cidx;
131 }
132
133 /* override dcb sorconf.link, based on what mxm data says */
134 switch (ctx.desc.outp_type) {
135 case 0x00: /* Analog CRT */
136 case 0x01: /* Analog TV/HDTV */
137 break;
138 default:
139 link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30;
140 ctx.outp[1] &= ~0x00000030;
141 ctx.outp[1] |= link;
142 break;
143 }
144
145 /* we may need to fixup various other vbios tables based on what
146 * the descriptor says the connector type should be.
147 *
148 * in a lot of cases, the vbios tables will claim DVI-I is possible,
149 * and the mxm data says the connector is really HDMI. another
150 * common example is DP->eDP.
151 */
152 conn = bios->data;
153 conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
154 type = conn[0];
155 switch (ctx.desc.conn_type) {
156 case 0x01: /* LVDS */
157 ctx.outp[1] |= 0x00000004; /* use_power_scripts */
158 /* XXX: modify default link width in LVDS table */
159 break;
160 case 0x02: /* HDMI */
161 type = DCB_CONNECTOR_HDMI_1;
162 break;
163 case 0x03: /* DVI-D */
164 type = DCB_CONNECTOR_DVI_D;
165 break;
166 case 0x0e: /* eDP, falls through to DPint */
167 ctx.outp[1] |= 0x00010000;
168 case 0x07: /* DP internal, wtf is this?? HP8670w */
169 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
170 type = DCB_CONNECTOR_eDP;
171 break;
172 default:
173 break;
174 }
175
176 if (mxms_version(mxm) >= 0x0300)
177 conn[0] = type;
178
179 return 0;
180}
181
182static bool
183mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
184{
185 u64 desc = *(u64 *)data;
186 if ((desc & 0xf0) != 0xf0)
187 nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
188 return true;
189}
190
191static void
192mxm_dcb_sanitise(struct nouveau_mxm *mxm)
193{
194 struct nouveau_bios *bios = nouveau_bios(mxm);
195 u8 ver, hdr, cnt, len;
196 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
197 if (dcb == 0x0000 || ver != 0x40) {
198 nv_debug(mxm, "unsupported DCB version\n");
199 return;
200 }
201
202 dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
203 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
204}
205
206static int
207nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
208 struct nouveau_oclass *oclass, void *data, u32 size,
209 struct nouveau_object **pobject)
210{
211 struct nv50_mxm_priv *priv;
212 int ret;
213
214 ret = nouveau_mxm_create(parent, engine, oclass, &priv);
215 *pobject = nv_object(priv);
216 if (ret)
217 return ret;
218
219 if (priv->base.action & MXM_SANITISE_DCB)
220 mxm_dcb_sanitise(&priv->base);
221 return 0;
222}
223
224struct nouveau_oclass
225nv50_mxm_oclass = {
226 .handle = NV_SUBDEV(MXM, 0x50),
227 .ofuncs = &(struct nouveau_ofuncs) {
228 .ctor = nv50_mxm_ctor,
229 .dtor = _nouveau_mxm_dtor,
230 .init = _nouveau_mxm_init,
231 .fini = _nouveau_mxm_fini,
232 },
233};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
new file mode 100644
index 000000000000..1674c74a76c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27
28#include <subdev/bios.h>
29
30#include "priv.h"
31
32int
33nouveau_therm_attr_get(struct nouveau_therm *therm,
34 enum nouveau_therm_attr_type type)
35{
36 struct nouveau_therm_priv *priv = (void *)therm;
37
38 switch (type) {
39 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
40 return priv->bios_fan.min_duty;
41 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
42 return priv->bios_fan.max_duty;
43 case NOUVEAU_THERM_ATTR_FAN_MODE:
44 return priv->fan.mode;
45 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
46 return priv->bios_sensor.thrs_fan_boost.temp;
47 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
48 return priv->bios_sensor.thrs_fan_boost.hysteresis;
49 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
50 return priv->bios_sensor.thrs_down_clock.temp;
51 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
52 return priv->bios_sensor.thrs_down_clock.hysteresis;
53 case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
54 return priv->bios_sensor.thrs_critical.temp;
55 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
56 return priv->bios_sensor.thrs_critical.hysteresis;
57 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
58 return priv->bios_sensor.thrs_shutdown.temp;
59 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
60 return priv->bios_sensor.thrs_shutdown.hysteresis;
61 }
62
63 return -EINVAL;
64}
65
66int
67nouveau_therm_attr_set(struct nouveau_therm *therm,
68 enum nouveau_therm_attr_type type, int value)
69{
70 struct nouveau_therm_priv *priv = (void *)therm;
71
72 switch (type) {
73 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
74 if (value < 0)
75 value = 0;
76 if (value > priv->bios_fan.max_duty)
77 value = priv->bios_fan.max_duty;
78 priv->bios_fan.min_duty = value;
79 return 0;
80 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
81 if (value < 0)
82 value = 0;
83 if (value < priv->bios_fan.min_duty)
84 value = priv->bios_fan.min_duty;
85 priv->bios_fan.max_duty = value;
86 return 0;
87 case NOUVEAU_THERM_ATTR_FAN_MODE:
88 return nouveau_therm_fan_set_mode(therm, value);
89 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
90 priv->bios_sensor.thrs_fan_boost.temp = value;
91 return 0;
92 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
93 priv->bios_sensor.thrs_fan_boost.hysteresis = value;
94 return 0;
95 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
96 priv->bios_sensor.thrs_down_clock.temp = value;
97 return 0;
98 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
99 priv->bios_sensor.thrs_down_clock.hysteresis = value;
100 return 0;
101 case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
102 priv->bios_sensor.thrs_critical.temp = value;
103 return 0;
104 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
105 priv->bios_sensor.thrs_critical.hysteresis = value;
106 return 0;
107 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
108 priv->bios_sensor.thrs_shutdown.temp = value;
109 return 0;
110 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
111 priv->bios_sensor.thrs_shutdown.hysteresis = value;
112 return 0;
113 }
114
115 return -EINVAL;
116}
117
118int
119nouveau_therm_init(struct nouveau_object *object)
120{
121 struct nouveau_therm *therm = (void *)object;
122 struct nouveau_therm_priv *priv = (void *)therm;
123 int ret;
124
125 ret = nouveau_subdev_init(&therm->base);
126 if (ret)
127 return ret;
128
129 if (priv->fan.percent >= 0)
130 therm->fan_set(therm, priv->fan.percent);
131
132 return 0;
133}
134
135int
136nouveau_therm_fini(struct nouveau_object *object, bool suspend)
137{
138 struct nouveau_therm *therm = (void *)object;
139 struct nouveau_therm_priv *priv = (void *)therm;
140
141 priv->fan.percent = therm->fan_get(therm);
142
143 return nouveau_subdev_fini(&therm->base, suspend);
144}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
new file mode 100644
index 000000000000..b29237970fa0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28#include <core/object.h>
29#include <core/device.h>
30#include <subdev/gpio.h>
31#include <subdev/timer.h>
32
33int
34nouveau_therm_fan_get(struct nouveau_therm *therm)
35{
36 struct nouveau_therm_priv *priv = (void *)therm;
37 struct nouveau_gpio *gpio = nouveau_gpio(therm);
38 struct dcb_gpio_func func;
39 int card_type = nv_device(therm)->card_type;
40 u32 divs, duty;
41 int ret;
42
43 if (!priv->fan.pwm_get)
44 return -ENODEV;
45
46 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
47 if (ret == 0) {
48 ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
49 if (ret == 0 && divs) {
50 divs = max(divs, duty);
51 if (card_type <= NV_40 || (func.log[0] & 1))
52 duty = divs - duty;
53 return (duty * 100) / divs;
54 }
55
56 return gpio->get(gpio, 0, func.func, func.line) * 100;
57 }
58
59 return -ENODEV;
60}
61
62int
63nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
64{
65 struct nouveau_therm_priv *priv = (void *)therm;
66 struct nouveau_gpio *gpio = nouveau_gpio(therm);
67 struct dcb_gpio_func func;
68 int card_type = nv_device(therm)->card_type;
69 u32 divs, duty;
70 int ret;
71
72 if (priv->fan.mode == FAN_CONTROL_NONE)
73 return -EINVAL;
74
75 if (!priv->fan.pwm_set)
76 return -ENODEV;
77
78 if (percent < priv->bios_fan.min_duty)
79 percent = priv->bios_fan.min_duty;
80 if (percent > priv->bios_fan.max_duty)
81 percent = priv->bios_fan.max_duty;
82
83 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
84 if (ret == 0) {
85 divs = priv->bios_perf_fan.pwm_divisor;
86 if (priv->bios_fan.pwm_freq) {
87 divs = 1;
88 if (priv->fan.pwm_clock)
89 divs = priv->fan.pwm_clock(therm);
90 divs /= priv->bios_fan.pwm_freq;
91 }
92
93 duty = ((divs * percent) + 99) / 100;
94 if (card_type <= NV_40 || (func.log[0] & 1))
95 duty = divs - duty;
96
97 ret = priv->fan.pwm_set(therm, func.line, divs, duty);
98 return ret;
99 }
100
101 return -ENODEV;
102}
103
104int
105nouveau_therm_fan_sense(struct nouveau_therm *therm)
106{
107 struct nouveau_timer *ptimer = nouveau_timer(therm);
108 struct nouveau_gpio *gpio = nouveau_gpio(therm);
109 struct dcb_gpio_func func;
110 u32 cycles, cur, prev;
111 u64 start, end, tach;
112
113 if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
114 return -ENODEV;
115
116 /* Time a complete rotation and extrapolate to RPM:
117 * When the fan spins, it changes the value of GPIO FAN_SENSE.
118 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
119 */
120 start = ptimer->read(ptimer);
121 prev = gpio->get(gpio, 0, func.func, func.line);
122 cycles = 0;
123 do {
124 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
125
126 cur = gpio->get(gpio, 0, func.func, func.line);
127 if (prev != cur) {
128 if (!start)
129 start = ptimer->read(ptimer);
130 cycles++;
131 prev = cur;
132 }
133 } while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
134 end = ptimer->read(ptimer);
135
136 if (cycles == 5) {
137 tach = (u64)60000000000;
138 do_div(tach, (end - start));
139 return tach;
140 } else
141 return 0;
142}
143
144int
145nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
146 enum nouveau_therm_fan_mode mode)
147{
148 struct nouveau_therm_priv *priv = (void *)therm;
149
150 if (priv->fan.mode == mode)
151 return 0;
152
153 if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
154 return -EINVAL;
155
156 switch (mode)
157 {
158 case FAN_CONTROL_NONE:
159 nv_info(therm, "switch fan to no-control mode\n");
160 break;
161 case FAN_CONTROL_MANUAL:
162 nv_info(therm, "switch fan to manual mode\n");
163 break;
164 case FAN_CONTROL_NR:
165 break;
166 }
167
168 priv->fan.mode = mode;
169 return 0;
170}
171
172int
173nouveau_therm_fan_user_get(struct nouveau_therm *therm)
174{
175 return nouveau_therm_fan_get(therm);
176}
177
178int
179nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
180{
181 struct nouveau_therm_priv *priv = (void *)therm;
182
183 if (priv->fan.mode != FAN_CONTROL_MANUAL)
184 return -EINVAL;
185
186 return nouveau_therm_fan_set(therm, percent);
187}
188
189void
190nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
191{
192 struct nouveau_therm_priv *priv = (void *)therm;
193
194 priv->bios_fan.pwm_freq = 0;
195 priv->bios_fan.min_duty = 0;
196 priv->bios_fan.max_duty = 100;
197}
198
199
200static void
201nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
202{
203 struct nouveau_therm_priv *priv = (void *)therm;
204
205 if (priv->bios_fan.min_duty > 100)
206 priv->bios_fan.min_duty = 100;
207 if (priv->bios_fan.max_duty > 100)
208 priv->bios_fan.max_duty = 100;
209
210 if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
211 priv->bios_fan.min_duty = priv->bios_fan.max_duty;
212}
213
214int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
215{
216 return 1;
217}
218
219int
220nouveau_therm_fan_ctor(struct nouveau_therm *therm)
221{
222 struct nouveau_therm_priv *priv = (void *)therm;
223 struct nouveau_bios *bios = nouveau_bios(therm);
224
225 nouveau_therm_fan_set_defaults(therm);
226 nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
227 if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
228 nv_error(therm, "parsing the thermal table failed\n");
229 nouveau_therm_fan_safety_checks(therm);
230
231 nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
232
233 return 0;
234}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
new file mode 100644
index 000000000000..e512ff0aae60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012 Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27#include <subdev/i2c.h>
28#include <subdev/bios/extdev.h>
29
30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info)
33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
35 struct i2c_client *client;
36
37 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
38
39 client = i2c_new_device(&i2c->adapter, info);
40 if (!client)
41 return false;
42
43 if (!client->driver || client->driver->detect(client, info)) {
44 i2c_unregister_device(client);
45 return false;
46 }
47
48 nv_info(priv,
49 "Found an %s at address 0x%x (controlled by lm_sensors)\n",
50 info->type, info->addr);
51 priv->ic = client;
52
53 return true;
54}
55
56void
57nouveau_therm_ic_ctor(struct nouveau_therm *therm)
58{
59 struct nouveau_therm_priv *priv = (void *)therm;
60 struct nouveau_bios *bios = nouveau_bios(therm);
61 struct nouveau_i2c *i2c = nouveau_i2c(therm);
62 struct nvbios_extdev_func extdev_entry;
63 struct i2c_board_info info[] = {
64 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
65 { I2C_BOARD_INFO("w83781d", 0x2d) },
66 { I2C_BOARD_INFO("adt7473", 0x2e) },
67 { I2C_BOARD_INFO("adt7473", 0x2d) },
68 { I2C_BOARD_INFO("adt7473", 0x2c) },
69 { I2C_BOARD_INFO("f75375", 0x2e) },
70 { I2C_BOARD_INFO("lm99", 0x4c) },
71 { I2C_BOARD_INFO("lm90", 0x4c) },
72 { I2C_BOARD_INFO("lm90", 0x4d) },
73 { I2C_BOARD_INFO("adm1021", 0x18) },
74 { I2C_BOARD_INFO("adm1021", 0x19) },
75 { I2C_BOARD_INFO("adm1021", 0x1a) },
76 { I2C_BOARD_INFO("adm1021", 0x29) },
77 { I2C_BOARD_INFO("adm1021", 0x2a) },
78 { I2C_BOARD_INFO("adm1021", 0x2b) },
79 { I2C_BOARD_INFO("adm1021", 0x4c) },
80 { I2C_BOARD_INFO("adm1021", 0x4d) },
81 { I2C_BOARD_INFO("adm1021", 0x4e) },
82 { I2C_BOARD_INFO("lm63", 0x18) },
83 { I2C_BOARD_INFO("lm63", 0x4e) },
84 { }
85 };
86
87 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
88 struct i2c_board_info board[] = {
89 { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
90 { }
91 };
92
93 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
94 board, probe_monitoring_device);
95 if (priv->ic)
96 return;
97 }
98
99 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
100 struct i2c_board_info board[] = {
101 { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
102 { }
103 };
104
105 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
106 board, probe_monitoring_device);
107 if (priv->ic)
108 return;
109 }
110
111 /* The vbios doesn't provide the address of an exisiting monitoring
112 device. Let's try our static list.
113 */
114 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
115 probe_monitoring_device);
116}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
new file mode 100644
index 000000000000..fcf2cfe731d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28static int
29nv40_sensor_setup(struct nouveau_therm *therm)
30{
31 struct nouveau_device *device = nv_device(therm);
32
33 /* enable ADC readout and disable the ALARM threshold */
34 if (device->chipset >= 0x46) {
35 nv_mask(therm, 0x15b8, 0x80000000, 0);
36 nv_wr32(therm, 0x15b0, 0x80003fff);
37 return nv_rd32(therm, 0x15b4) & 0x3fff;
38 } else {
39 nv_wr32(therm, 0x15b0, 0xff);
40 return nv_rd32(therm, 0x15b4) & 0xff;
41 }
42}
43
44static int
45nv40_temp_get(struct nouveau_therm *therm)
46{
47 struct nouveau_therm_priv *priv = (void *)therm;
48 struct nouveau_device *device = nv_device(therm);
49 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
50 int core_temp;
51
52 if (device->chipset >= 0x46) {
53 nv_wr32(therm, 0x15b0, 0x80003fff);
54 core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
55 } else {
56 nv_wr32(therm, 0x15b0, 0xff);
57 core_temp = nv_rd32(therm, 0x15b4) & 0xff;
58 }
59
60 /* Setup the sensor if the temperature is 0 */
61 if (core_temp == 0)
62 core_temp = nv40_sensor_setup(therm);
63
64 if (sensor->slope_div == 0)
65 sensor->slope_div = 1;
66 if (sensor->offset_den == 0)
67 sensor->offset_den = 1;
68 if (sensor->slope_mult < 1)
69 sensor->slope_mult = 1;
70
71 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
72 core_temp = core_temp + sensor->offset_num / sensor->offset_den;
73 core_temp = core_temp + sensor->offset_constant - 8;
74
75 return core_temp;
76}
77
78int
79nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
80{
81 if (line == 2) {
82 u32 reg = nv_rd32(therm, 0x0010f0);
83 if (reg & 0x80000000) {
84 *duty = (reg & 0x7fff0000) >> 16;
85 *divs = (reg & 0x00007fff);
86 return 0;
87 }
88 } else
89 if (line == 9) {
90 u32 reg = nv_rd32(therm, 0x0015f4);
91 if (reg & 0x80000000) {
92 *divs = nv_rd32(therm, 0x0015f8);
93 *duty = (reg & 0x7fffffff);
94 return 0;
95 }
96 } else {
97 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
98 return -ENODEV;
99 }
100
101 return -EINVAL;
102}
103
104int
105nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
106{
107 if (line == 2) {
108 nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
109 } else
110 if (line == 9) {
111 nv_wr32(therm, 0x0015f8, divs);
112 nv_wr32(therm, 0x0015f4, duty | 0x80000000);
113 } else {
114 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
115 return -ENODEV;
116 }
117
118 return 0;
119}
120
121static int
122nv40_therm_ctor(struct nouveau_object *parent,
123 struct nouveau_object *engine,
124 struct nouveau_oclass *oclass, void *data, u32 size,
125 struct nouveau_object **pobject)
126{
127 struct nouveau_therm_priv *priv;
128 struct nouveau_therm *therm;
129 int ret;
130
131 ret = nouveau_therm_create(parent, engine, oclass, &priv);
132 *pobject = nv_object(priv);
133 therm = (void *) priv;
134 if (ret)
135 return ret;
136
137 nouveau_therm_ic_ctor(therm);
138 nouveau_therm_sensor_ctor(therm);
139 nouveau_therm_fan_ctor(therm);
140
141 priv->fan.pwm_get = nv40_fan_pwm_get;
142 priv->fan.pwm_set = nv40_fan_pwm_set;
143
144 therm->temp_get = nv40_temp_get;
145 therm->fan_get = nouveau_therm_fan_user_get;
146 therm->fan_set = nouveau_therm_fan_user_set;
147 therm->fan_sense = nouveau_therm_fan_sense;
148 therm->attr_get = nouveau_therm_attr_get;
149 therm->attr_set = nouveau_therm_attr_set;
150
151 return 0;
152}
153
154struct nouveau_oclass
155nv40_therm_oclass = {
156 .handle = NV_SUBDEV(THERM, 0x40),
157 .ofuncs = &(struct nouveau_ofuncs) {
158 .ctor = nv40_therm_ctor,
159 .dtor = _nouveau_therm_dtor,
160 .init = nouveau_therm_init,
161 .fini = nouveau_therm_fini,
162 },
163}; \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
new file mode 100644
index 000000000000..f87a7a3eb4e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28static int
29pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
30{
31 if (*line == 0x04) {
32 *ctrl = 0x00e100;
33 *line = 4;
34 *indx = 0;
35 } else
36 if (*line == 0x09) {
37 *ctrl = 0x00e100;
38 *line = 9;
39 *indx = 1;
40 } else
41 if (*line == 0x10) {
42 *ctrl = 0x00e28c;
43 *line = 0;
44 *indx = 0;
45 } else {
46 nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
47 return -ENODEV;
48 }
49
50 return 0;
51}
52
53int
54nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
55{
56 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
57 if (ret)
58 return ret;
59
60 if (nv_rd32(therm, ctrl) & (1 << line)) {
61 *divs = nv_rd32(therm, 0x00e114 + (id * 8));
62 *duty = nv_rd32(therm, 0x00e118 + (id * 8));
63 return 0;
64 }
65
66 return -EINVAL;
67}
68
69int
70nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
71{
72 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
73 if (ret)
74 return ret;
75
76 nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
77 nv_wr32(therm, 0x00e114 + (id * 8), divs);
78 nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
79 return 0;
80}
81
82int
83nv50_fan_pwm_clock(struct nouveau_therm *therm)
84{
85 int chipset = nv_device(therm)->chipset;
86 int crystal = nv_device(therm)->crystal;
87 int pwm_clock;
88
89 /* determine the PWM source clock */
90 if (chipset > 0x50 && chipset < 0x94) {
91 u8 pwm_div = nv_rd32(therm, 0x410c);
92 if (nv_rd32(therm, 0xc040) & 0x800000) {
93 /* Use the HOST clock (100 MHz)
94 * Where does this constant(2.4) comes from? */
95 pwm_clock = (100000000 >> pwm_div) / 10 / 24;
96 } else {
97 /* Where does this constant(20) comes from? */
98 pwm_clock = (crystal * 1000) >> pwm_div;
99 pwm_clock /= 20;
100 }
101 } else {
102 pwm_clock = (crystal * 1000) / 20;
103 }
104
105 return pwm_clock;
106}
107
108int
109nv50_temp_get(struct nouveau_therm *therm)
110{
111 return nv_rd32(therm, 0x20400);
112}
113
114static int
115nv50_therm_ctor(struct nouveau_object *parent,
116 struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject)
119{
120 struct nouveau_therm_priv *priv;
121 struct nouveau_therm *therm;
122 int ret;
123
124 ret = nouveau_therm_create(parent, engine, oclass, &priv);
125 *pobject = nv_object(priv);
126 therm = (void *) priv;
127 if (ret)
128 return ret;
129
130 nouveau_therm_ic_ctor(therm);
131 nouveau_therm_sensor_ctor(therm);
132 nouveau_therm_fan_ctor(therm);
133
134 priv->fan.pwm_get = nv50_fan_pwm_get;
135 priv->fan.pwm_set = nv50_fan_pwm_set;
136 priv->fan.pwm_clock = nv50_fan_pwm_clock;
137
138 therm->temp_get = nv50_temp_get;
139 therm->fan_get = nouveau_therm_fan_user_get;
140 therm->fan_set = nouveau_therm_fan_user_set;
141 therm->fan_sense = nouveau_therm_fan_sense;
142 therm->attr_get = nouveau_therm_attr_get;
143 therm->attr_set = nouveau_therm_attr_set;
144
145 return 0;
146}
147
148struct nouveau_oclass
149nv50_therm_oclass = {
150 .handle = NV_SUBDEV(THERM, 0x50),
151 .ofuncs = &(struct nouveau_ofuncs) {
152 .ctor = nv50_therm_ctor,
153 .dtor = _nouveau_therm_dtor,
154 .init = nouveau_therm_init,
155 .fini = nouveau_therm_fini,
156 },
157};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
new file mode 100644
index 000000000000..1c3cd6abc36e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/therm.h>
26
27#include <subdev/bios/extdev.h>
28#include <subdev/bios/perf.h>
29#include <subdev/bios/therm.h>
30
31struct nouveau_therm_priv {
32 struct nouveau_therm base;
33
34 /* bios */
35 struct nvbios_therm_sensor bios_sensor;
36 struct nvbios_therm_fan bios_fan;
37 struct nvbios_perf_fan bios_perf_fan;
38
39 /* fan priv */
40 struct {
41 enum nouveau_therm_fan_mode mode;
42 int percent;
43
44 int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
45 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
46 int (*pwm_clock)(struct nouveau_therm *);
47 } fan;
48
49 /* ic */
50 struct i2c_client *ic;
51};
52
53int nouveau_therm_init(struct nouveau_object *object);
54int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
55int nouveau_therm_attr_get(struct nouveau_therm *therm,
56 enum nouveau_therm_attr_type type);
57int nouveau_therm_attr_set(struct nouveau_therm *therm,
58 enum nouveau_therm_attr_type type, int value);
59
60void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
61
62int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
63
64int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
65int nouveau_therm_fan_get(struct nouveau_therm *therm);
66int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
67int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
68int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
69int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
70 enum nouveau_therm_fan_mode mode);
71
72
73int nouveau_therm_fan_sense(struct nouveau_therm *therm);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
new file mode 100644
index 000000000000..204282301fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27#include <core/object.h>
28#include <core/device.h>
29
30#include <subdev/bios.h>
31
32static void
33nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
34{
35 struct nouveau_therm_priv *priv = (void *)therm;
36
37 priv->bios_sensor.slope_mult = 1;
38 priv->bios_sensor.slope_div = 1;
39 priv->bios_sensor.offset_num = 0;
40 priv->bios_sensor.offset_den = 1;
41 priv->bios_sensor.offset_constant = 0;
42
43 priv->bios_sensor.thrs_fan_boost.temp = 90;
44 priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
45
46 priv->bios_sensor.thrs_down_clock.temp = 95;
47 priv->bios_sensor.thrs_down_clock.hysteresis = 3;
48
49 priv->bios_sensor.thrs_critical.temp = 105;
50 priv->bios_sensor.thrs_critical.hysteresis = 5;
51
52 priv->bios_sensor.thrs_shutdown.temp = 135;
53 priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
54}
55
56
57static void
58nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
59{
60 struct nouveau_therm_priv *priv = (void *)therm;
61
62 if (!priv->bios_sensor.slope_div)
63 priv->bios_sensor.slope_div = 1;
64 if (!priv->bios_sensor.offset_den)
65 priv->bios_sensor.offset_den = 1;
66}
67
68int
69nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
70{
71 struct nouveau_therm_priv *priv = (void *)therm;
72 struct nouveau_bios *bios = nouveau_bios(therm);
73
74 nouveau_therm_temp_set_defaults(therm);
75 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
76 &priv->bios_sensor))
77 nv_error(therm, "nvbios_therm_sensor_parse failed\n");
78 nouveau_therm_temp_safety_checks(therm);
79
80 return 0;
81}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
new file mode 100644
index 000000000000..5d417cc9949b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/timer.h"
26
27bool
28nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
29{
30 struct nouveau_timer *ptimer = nouveau_timer(obj);
31 u64 time0;
32
33 time0 = ptimer->read(ptimer);
34 do {
35 if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
36 if ((nv_rd32(obj, addr) & mask) == data)
37 return true;
38 } else {
39 if ((nv_ro32(obj, addr) & mask) == data)
40 return true;
41 }
42 } while (ptimer->read(ptimer) - time0 < nsec);
43
44 return false;
45}
46
47bool
48nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
49{
50 struct nouveau_timer *ptimer = nouveau_timer(obj);
51 u64 time0;
52
53 time0 = ptimer->read(ptimer);
54 do {
55 if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
56 if ((nv_rd32(obj, addr) & mask) != data)
57 return true;
58 } else {
59 if ((nv_ro32(obj, addr) & mask) != data)
60 return true;
61 }
62 } while (ptimer->read(ptimer) - time0 < nsec);
63
64 return false;
65}
66
67bool
68nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
69{
70 struct nouveau_timer *ptimer = nouveau_timer(obj);
71 u64 time0;
72
73 time0 = ptimer->read(ptimer);
74 do {
75 if (func(data) == true)
76 return true;
77 } while (ptimer->read(ptimer) - time0 < nsec);
78
79 return false;
80}
81
82void
83nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
84{
85 struct nouveau_timer *ptimer = nouveau_timer(obj);
86 ptimer->alarm(ptimer, nsec, alarm);
87}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
new file mode 100644
index 000000000000..49976be4d73b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/timer.h>
26
27#define NV04_PTIMER_INTR_0 0x009100
28#define NV04_PTIMER_INTR_EN_0 0x009140
29#define NV04_PTIMER_NUMERATOR 0x009200
30#define NV04_PTIMER_DENOMINATOR 0x009210
31#define NV04_PTIMER_TIME_0 0x009400
32#define NV04_PTIMER_TIME_1 0x009410
33#define NV04_PTIMER_ALARM_0 0x009420
34
35struct nv04_timer_priv {
36 struct nouveau_timer base;
37 struct list_head alarms;
38 spinlock_t lock;
39};
40
41static u64
42nv04_timer_read(struct nouveau_timer *ptimer)
43{
44 struct nv04_timer_priv *priv = (void *)ptimer;
45 u32 hi, lo;
46
47 do {
48 hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
49 lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
50 } while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
51
52 return ((u64)hi << 32 | lo);
53}
54
55static void
56nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
57{
58 struct nv04_timer_priv *priv = (void *)ptimer;
59 struct nouveau_alarm *alarm, *atemp;
60 unsigned long flags;
61 LIST_HEAD(exec);
62
63 /* move any due alarms off the pending list */
64 spin_lock_irqsave(&priv->lock, flags);
65 list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
66 if (alarm->timestamp <= ptimer->read(ptimer))
67 list_move_tail(&alarm->head, &exec);
68 }
69
70 /* reschedule interrupt for next alarm time */
71 if (!list_empty(&priv->alarms)) {
72 alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
73 nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
74 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
75 } else {
76 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
77 }
78 spin_unlock_irqrestore(&priv->lock, flags);
79
80 /* execute any pending alarm handlers */
81 list_for_each_entry_safe(alarm, atemp, &exec, head) {
82 list_del(&alarm->head);
83 alarm->func(alarm);
84 }
85}
86
87static void
88nv04_timer_alarm(struct nouveau_timer *ptimer, u32 time,
89 struct nouveau_alarm *alarm)
90{
91 struct nv04_timer_priv *priv = (void *)ptimer;
92 struct nouveau_alarm *list;
93 unsigned long flags;
94
95 alarm->timestamp = ptimer->read(ptimer) + time;
96
97 /* append new alarm to list, in soonest-alarm-first order */
98 spin_lock_irqsave(&priv->lock, flags);
99 list_for_each_entry(list, &priv->alarms, head) {
100 if (list->timestamp > alarm->timestamp)
101 break;
102 }
103 list_add_tail(&alarm->head, &list->head);
104 spin_unlock_irqrestore(&priv->lock, flags);
105
106 /* process pending alarms */
107 nv04_timer_alarm_trigger(ptimer);
108}
109
110static void
111nv04_timer_intr(struct nouveau_subdev *subdev)
112{
113 struct nv04_timer_priv *priv = (void *)subdev;
114 u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
115
116 if (stat & 0x00000001) {
117 nv04_timer_alarm_trigger(&priv->base);
118 nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
119 stat &= ~0x00000001;
120 }
121
122 if (stat) {
123 nv_error(priv, "unknown stat 0x%08x\n", stat);
124 nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
125 }
126}
127
128static int
129nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
130 struct nouveau_oclass *oclass, void *data, u32 size,
131 struct nouveau_object **pobject)
132{
133 struct nv04_timer_priv *priv;
134 int ret;
135
136 ret = nouveau_timer_create(parent, engine, oclass, &priv);
137 *pobject = nv_object(priv);
138 if (ret)
139 return ret;
140
141 priv->base.base.intr = nv04_timer_intr;
142 priv->base.read = nv04_timer_read;
143 priv->base.alarm = nv04_timer_alarm;
144
145 INIT_LIST_HEAD(&priv->alarms);
146 spin_lock_init(&priv->lock);
147 return 0;
148}
149
150static void
151nv04_timer_dtor(struct nouveau_object *object)
152{
153 struct nv04_timer_priv *priv = (void *)object;
154 return nouveau_timer_destroy(&priv->base);
155}
156
157static int
158nv04_timer_init(struct nouveau_object *object)
159{
160 struct nouveau_device *device = nv_device(object);
161 struct nv04_timer_priv *priv = (void *)object;
162 u32 m = 1, f, n, d;
163 int ret;
164
165 ret = nouveau_timer_init(&priv->base);
166 if (ret)
167 return ret;
168
169 /* aim for 31.25MHz, which gives us nanosecond timestamps */
170 d = 1000000 / 32;
171
172 /* determine base clock for timer source */
173#if 0 /*XXX*/
174 if (device->chipset < 0x40) {
175 n = nouveau_hw_get_clock(device, PLL_CORE);
176 } else
177#endif
178 if (device->chipset <= 0x40) {
179 /*XXX: figure this out */
180 f = -1;
181 n = 0;
182 } else {
183 f = device->crystal;
184 n = f;
185 while (n < (d * 2)) {
186 n += (n / m);
187 m++;
188 }
189
190 nv_wr32(priv, 0x009220, m - 1);
191 }
192
193 if (!n) {
194 nv_warn(priv, "unknown input clock freq\n");
195 if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
196 !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
197 nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
198 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
199 }
200 return 0;
201 }
202
203 /* reduce ratio to acceptable values */
204 while (((n % 5) == 0) && ((d % 5) == 0)) {
205 n /= 5;
206 d /= 5;
207 }
208
209 while (((n % 2) == 0) && ((d % 2) == 0)) {
210 n /= 2;
211 d /= 2;
212 }
213
214 while (n > 0xffff || d > 0xffff) {
215 n >>= 1;
216 d >>= 1;
217 }
218
219 nv_debug(priv, "input frequency : %dHz\n", f);
220 nv_debug(priv, "input multiplier: %d\n", m);
221 nv_debug(priv, "numerator : 0x%08x\n", n);
222 nv_debug(priv, "denominator : 0x%08x\n", d);
223 nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
224
225 nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
226 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
227 nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
228 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
229 return 0;
230}
231
232static int
233nv04_timer_fini(struct nouveau_object *object, bool suspend)
234{
235 struct nv04_timer_priv *priv = (void *)object;
236 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
237 return nouveau_timer_fini(&priv->base, suspend);
238}
239
240struct nouveau_oclass
241nv04_timer_oclass = {
242 .handle = NV_SUBDEV(TIMER, 0x04),
243 .ofuncs = &(struct nouveau_ofuncs) {
244 .ctor = nv04_timer_ctor,
245 .dtor = nv04_timer_dtor,
246 .init = nv04_timer_init,
247 .fini = nv04_timer_fini,
248 }
249};
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 11edd5e91a0a..082c11b75acb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -22,22 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/gpuobj.h>
26#include "nouveau_drv.h" 26#include <core/mm.h>
27#include "nouveau_mm.h" 27
28#include "nouveau_vm.h" 28#include <subdev/fb.h>
29#include <subdev/vm.h>
29 30
30void 31void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) 32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32{ 33{
33 struct nouveau_vm *vm = vma->vm; 34 struct nouveau_vm *vm = vma->vm;
35 struct nouveau_vmmgr *vmm = vm->vmm;
34 struct nouveau_mm_node *r; 36 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift; 37 int big = vma->node->type != vmm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12); 38 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12; 39 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 40 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 41 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits); 42 u32 max = 1 << (vmm->pgt_bits - bits);
41 u32 end, len; 43 u32 end, len;
42 44
43 delta = 0; 45 delta = 0;
@@ -53,7 +55,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
53 end = max; 55 end = max;
54 len = end - pte; 56 len = end - pte;
55 57
56 vm->map(vma, pgt, node, pte, len, phys, delta); 58 vmm->map(vma, pgt, node, pte, len, phys, delta);
57 59
58 num -= len; 60 num -= len;
59 pte += len; 61 pte += len;
@@ -67,7 +69,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
67 } 69 }
68 } 70 }
69 71
70 vm->flush(vm); 72 vmm->flush(vm);
71} 73}
72 74
73void 75void
@@ -81,13 +83,14 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem) 83 struct nouveau_mem *mem)
82{ 84{
83 struct nouveau_vm *vm = vma->vm; 85 struct nouveau_vm *vm = vma->vm;
84 int big = vma->node->type != vm->spg_shift; 86 struct nouveau_vmmgr *vmm = vm->vmm;
87 int big = vma->node->type != vmm->spg_shift;
85 u32 offset = vma->node->offset + (delta >> 12); 88 u32 offset = vma->node->offset + (delta >> 12);
86 u32 bits = vma->node->type - 12; 89 u32 bits = vma->node->type - 12;
87 u32 num = length >> vma->node->type; 90 u32 num = length >> vma->node->type;
88 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 91 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
89 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 92 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
90 u32 max = 1 << (vm->pgt_bits - bits); 93 u32 max = 1 << (vmm->pgt_bits - bits);
91 unsigned m, sglen; 94 unsigned m, sglen;
92 u32 end, len; 95 u32 end, len;
93 int i; 96 int i;
@@ -105,7 +108,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
105 for (m = 0; m < len; m++) { 108 for (m = 0; m < len; m++) {
106 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 109 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
107 110
108 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 111 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
109 num--; 112 num--;
110 pte++; 113 pte++;
111 114
@@ -120,7 +123,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
120 for (; m < sglen; m++) { 123 for (; m < sglen; m++) {
121 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 124 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
122 125
123 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 126 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
124 num--; 127 num--;
125 pte++; 128 pte++;
126 if (num == 0) 129 if (num == 0)
@@ -130,7 +133,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
130 133
131 } 134 }
132finish: 135finish:
133 vm->flush(vm); 136 vmm->flush(vm);
134} 137}
135 138
136void 139void
@@ -138,14 +141,15 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
138 struct nouveau_mem *mem) 141 struct nouveau_mem *mem)
139{ 142{
140 struct nouveau_vm *vm = vma->vm; 143 struct nouveau_vm *vm = vma->vm;
144 struct nouveau_vmmgr *vmm = vm->vmm;
141 dma_addr_t *list = mem->pages; 145 dma_addr_t *list = mem->pages;
142 int big = vma->node->type != vm->spg_shift; 146 int big = vma->node->type != vmm->spg_shift;
143 u32 offset = vma->node->offset + (delta >> 12); 147 u32 offset = vma->node->offset + (delta >> 12);
144 u32 bits = vma->node->type - 12; 148 u32 bits = vma->node->type - 12;
145 u32 num = length >> vma->node->type; 149 u32 num = length >> vma->node->type;
146 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 150 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
147 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 151 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
148 u32 max = 1 << (vm->pgt_bits - bits); 152 u32 max = 1 << (vmm->pgt_bits - bits);
149 u32 end, len; 153 u32 end, len;
150 154
151 while (num) { 155 while (num) {
@@ -156,7 +160,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
156 end = max; 160 end = max;
157 len = end - pte; 161 len = end - pte;
158 162
159 vm->map_sg(vma, pgt, mem, pte, len, list); 163 vmm->map_sg(vma, pgt, mem, pte, len, list);
160 164
161 num -= len; 165 num -= len;
162 pte += len; 166 pte += len;
@@ -167,20 +171,21 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
167 } 171 }
168 } 172 }
169 173
170 vm->flush(vm); 174 vmm->flush(vm);
171} 175}
172 176
173void 177void
174nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) 178nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
175{ 179{
176 struct nouveau_vm *vm = vma->vm; 180 struct nouveau_vm *vm = vma->vm;
177 int big = vma->node->type != vm->spg_shift; 181 struct nouveau_vmmgr *vmm = vm->vmm;
182 int big = vma->node->type != vmm->spg_shift;
178 u32 offset = vma->node->offset + (delta >> 12); 183 u32 offset = vma->node->offset + (delta >> 12);
179 u32 bits = vma->node->type - 12; 184 u32 bits = vma->node->type - 12;
180 u32 num = length >> vma->node->type; 185 u32 num = length >> vma->node->type;
181 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 186 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
182 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 187 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
183 u32 max = 1 << (vm->pgt_bits - bits); 188 u32 max = 1 << (vmm->pgt_bits - bits);
184 u32 end, len; 189 u32 end, len;
185 190
186 while (num) { 191 while (num) {
@@ -191,7 +196,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
191 end = max; 196 end = max;
192 len = end - pte; 197 len = end - pte;
193 198
194 vm->unmap(pgt, pte, len); 199 vmm->unmap(pgt, pte, len);
195 200
196 num -= len; 201 num -= len;
197 pte += len; 202 pte += len;
@@ -201,7 +206,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
201 } 206 }
202 } 207 }
203 208
204 vm->flush(vm); 209 vmm->flush(vm);
205} 210}
206 211
207void 212void
@@ -213,6 +218,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
213static void 218static void
214nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) 219nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
215{ 220{
221 struct nouveau_vmmgr *vmm = vm->vmm;
216 struct nouveau_vm_pgd *vpgd; 222 struct nouveau_vm_pgd *vpgd;
217 struct nouveau_vm_pgt *vpgt; 223 struct nouveau_vm_pgt *vpgt;
218 struct nouveau_gpuobj *pgt; 224 struct nouveau_gpuobj *pgt;
@@ -227,7 +233,7 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
227 vpgt->obj[big] = NULL; 233 vpgt->obj[big] = NULL;
228 234
229 list_for_each_entry(vpgd, &vm->pgd_list, head) { 235 list_for_each_entry(vpgd, &vm->pgd_list, head) {
230 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
231 } 237 }
232 238
233 mutex_unlock(&vm->mm.mutex); 239 mutex_unlock(&vm->mm.mutex);
@@ -239,18 +245,19 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
239static int 245static int
240nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) 246nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
241{ 247{
248 struct nouveau_vmmgr *vmm = vm->vmm;
242 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 249 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
243 struct nouveau_vm_pgd *vpgd; 250 struct nouveau_vm_pgd *vpgd;
244 struct nouveau_gpuobj *pgt; 251 struct nouveau_gpuobj *pgt;
245 int big = (type != vm->spg_shift); 252 int big = (type != vmm->spg_shift);
246 u32 pgt_size; 253 u32 pgt_size;
247 int ret; 254 int ret;
248 255
249 pgt_size = (1 << (vm->pgt_bits + 12)) >> type; 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
250 pgt_size *= 8; 257 pgt_size *= 8;
251 258
252 mutex_unlock(&vm->mm.mutex); 259 mutex_unlock(&vm->mm.mutex);
253 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
254 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
255 mutex_lock(&vm->mm.mutex); 262 mutex_lock(&vm->mm.mutex);
256 if (unlikely(ret)) 263 if (unlikely(ret))
@@ -266,7 +273,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
266 273
267 vpgt->obj[big] = pgt; 274 vpgt->obj[big] = pgt;
268 list_for_each_entry(vpgd, &vm->pgd_list, head) { 275 list_for_each_entry(vpgd, &vm->pgd_list, head) {
269 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 276 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
270 } 277 }
271 278
272 return 0; 279 return 0;
@@ -276,23 +283,26 @@ int
276nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, 283nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
277 u32 access, struct nouveau_vma *vma) 284 u32 access, struct nouveau_vma *vma)
278{ 285{
286 struct nouveau_vmmgr *vmm = vm->vmm;
279 u32 align = (1 << page_shift) >> 12; 287 u32 align = (1 << page_shift) >> 12;
280 u32 msize = size >> 12; 288 u32 msize = size >> 12;
281 u32 fpde, lpde, pde; 289 u32 fpde, lpde, pde;
282 int ret; 290 int ret;
283 291
284 mutex_lock(&vm->mm.mutex); 292 mutex_lock(&vm->mm.mutex);
285 ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); 293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294 &vma->node);
286 if (unlikely(ret != 0)) { 295 if (unlikely(ret != 0)) {
287 mutex_unlock(&vm->mm.mutex); 296 mutex_unlock(&vm->mm.mutex);
288 return ret; 297 return ret;
289 } 298 }
290 299
291 fpde = (vma->node->offset >> vm->pgt_bits); 300 fpde = (vma->node->offset >> vmm->pgt_bits);
292 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
302
293 for (pde = fpde; pde <= lpde; pde++) { 303 for (pde = fpde; pde <= lpde; pde++) {
294 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 304 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
295 int big = (vma->node->type != vm->spg_shift); 305 int big = (vma->node->type != vmm->spg_shift);
296 306
297 if (likely(vpgt->refcount[big])) { 307 if (likely(vpgt->refcount[big])) {
298 vpgt->refcount[big]++; 308 vpgt->refcount[big]++;
@@ -303,9 +313,8 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
303 if (ret) { 313 if (ret) {
304 if (pde != fpde) 314 if (pde != fpde)
305 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
306 nouveau_mm_put(&vm->mm, vma->node); 316 nouveau_mm_free(&vm->mm, &vma->node);
307 mutex_unlock(&vm->mm.mutex); 317 mutex_unlock(&vm->mm.mutex);
308 vma->node = NULL;
309 return ret; 318 return ret;
310 } 319 }
311 } 320 }
@@ -321,91 +330,67 @@ void
321nouveau_vm_put(struct nouveau_vma *vma) 330nouveau_vm_put(struct nouveau_vma *vma)
322{ 331{
323 struct nouveau_vm *vm = vma->vm; 332 struct nouveau_vm *vm = vma->vm;
333 struct nouveau_vmmgr *vmm = vm->vmm;
324 u32 fpde, lpde; 334 u32 fpde, lpde;
325 335
326 if (unlikely(vma->node == NULL)) 336 if (unlikely(vma->node == NULL))
327 return; 337 return;
328 fpde = (vma->node->offset >> vm->pgt_bits); 338 fpde = (vma->node->offset >> vmm->pgt_bits);
329 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
330 340
331 mutex_lock(&vm->mm.mutex); 341 mutex_lock(&vm->mm.mutex);
332 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); 342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
333 nouveau_mm_put(&vm->mm, vma->node); 343 nouveau_mm_free(&vm->mm, &vma->node);
334 vma->node = NULL;
335 mutex_unlock(&vm->mm.mutex); 344 mutex_unlock(&vm->mm.mutex);
336} 345}
337 346
338int 347int
339nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, 348nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
340 struct nouveau_vm **pvm) 349 u64 mm_offset, u32 block, struct nouveau_vm **pvm)
341{ 350{
342 struct drm_nouveau_private *dev_priv = dev->dev_private;
343 struct nouveau_vm *vm; 351 struct nouveau_vm *vm;
344 u64 mm_length = (offset + length) - mm_offset; 352 u64 mm_length = (offset + length) - mm_offset;
345 u32 block, pgt_bits;
346 int ret; 353 int ret;
347 354
348 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
349 if (!vm) 356 if (!vm)
350 return -ENOMEM; 357 return -ENOMEM;
351 358
352 if (dev_priv->card_type == NV_50) { 359 INIT_LIST_HEAD(&vm->pgd_list);
353 vm->map_pgt = nv50_vm_map_pgt; 360 vm->vmm = vmm;
354 vm->map = nv50_vm_map; 361 vm->refcount = 1;
355 vm->map_sg = nv50_vm_map_sg; 362 vm->fpde = offset >> (vmm->pgt_bits + 12);
356 vm->unmap = nv50_vm_unmap; 363 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
357 vm->flush = nv50_vm_flush;
358 vm->spg_shift = 12;
359 vm->lpg_shift = 16;
360
361 pgt_bits = 29;
362 block = (1 << pgt_bits);
363 if (length < block)
364 block = length;
365
366 } else
367 if (dev_priv->card_type >= NV_C0) {
368 vm->map_pgt = nvc0_vm_map_pgt;
369 vm->map = nvc0_vm_map;
370 vm->map_sg = nvc0_vm_map_sg;
371 vm->unmap = nvc0_vm_unmap;
372 vm->flush = nvc0_vm_flush;
373 vm->spg_shift = 12;
374 vm->lpg_shift = 17;
375 pgt_bits = 27;
376 block = 4096;
377 } else {
378 kfree(vm);
379 return -ENOSYS;
380 }
381 364
382 vm->fpde = offset >> pgt_bits; 365 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
383 vm->lpde = (offset + length - 1) >> pgt_bits;
384 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
385 if (!vm->pgt) { 366 if (!vm->pgt) {
386 kfree(vm); 367 kfree(vm);
387 return -ENOMEM; 368 return -ENOMEM;
388 } 369 }
389 370
390 INIT_LIST_HEAD(&vm->pgd_list);
391 vm->dev = dev;
392 vm->refcount = 1;
393 vm->pgt_bits = pgt_bits - 12;
394
395 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, 371 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
396 block >> 12); 372 block >> 12);
397 if (ret) { 373 if (ret) {
374 kfree(vm->pgt);
398 kfree(vm); 375 kfree(vm);
399 return ret; 376 return ret;
400 } 377 }
401 378
402 *pvm = vm;
403 return 0; 379 return 0;
404} 380}
405 381
382int
383nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
384 u64 mm_offset, struct nouveau_vm **pvm)
385{
386 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
387 return vmm->create(vmm, offset, length, mm_offset, pvm);
388}
389
406static int 390static int
407nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) 391nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
408{ 392{
393 struct nouveau_vmmgr *vmm = vm->vmm;
409 struct nouveau_vm_pgd *vpgd; 394 struct nouveau_vm_pgd *vpgd;
410 int i; 395 int i;
411 396
@@ -420,7 +405,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
420 405
421 mutex_lock(&vm->mm.mutex); 406 mutex_lock(&vm->mm.mutex);
422 for (i = vm->fpde; i <= vm->lpde; i++) 407 for (i = vm->fpde; i <= vm->lpde; i++)
423 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 408 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
424 list_add(&vpgd->head, &vm->pgd_list); 409 list_add(&vpgd->head, &vm->pgd_list);
425 mutex_unlock(&vm->mm.mutex); 410 mutex_unlock(&vm->mm.mutex);
426 return 0; 411 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
new file mode 100644
index 000000000000..6adbbc9cc361
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include "nv04.h"
28
29#define NV04_PDMA_SIZE (128 * 1024 * 1024)
30#define NV04_PDMA_PAGE ( 4 * 1024)
31
32/*******************************************************************************
33 * VM map/unmap callbacks
34 ******************************************************************************/
35
36static void
37nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
38 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
39{
40 pte = 0x00008 + (pte * 4);
41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++;
44 while (cnt && page--) {
45 nv_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE;
47 pte += 4;
48 cnt -= 1;
49 }
50 }
51}
52
53static void
54nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
55{
56 pte = 0x00008 + (pte * 4);
57 while (cnt--) {
58 nv_wo32(pgt, pte, 0x00000000);
59 pte += 4;
60 }
61}
62
63static void
64nv04_vm_flush(struct nouveau_vm *vm)
65{
66}
67
68/*******************************************************************************
69 * VM object
70 ******************************************************************************/
71
72int
73nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
74 struct nouveau_vm **pvm)
75{
76 return -EINVAL;
77}
78
79/*******************************************************************************
80 * VMMGR subdev
81 ******************************************************************************/
82
83static int
84nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
85 struct nouveau_oclass *oclass, void *data, u32 size,
86 struct nouveau_object **pobject)
87{
88 struct nv04_vmmgr_priv *priv;
89 struct nouveau_gpuobj *dma;
90 int ret;
91
92 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
93 "pcigart", &priv);
94 *pobject = nv_object(priv);
95 if (ret)
96 return ret;
97
98 priv->base.create = nv04_vm_create;
99 priv->base.limit = NV04_PDMA_SIZE;
100 priv->base.dma_bits = 32;
101 priv->base.pgt_bits = 32 - 12;
102 priv->base.spg_shift = 12;
103 priv->base.lpg_shift = 12;
104 priv->base.map_sg = nv04_vm_map_sg;
105 priv->base.unmap = nv04_vm_unmap;
106 priv->base.flush = nv04_vm_flush;
107
108 ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
109 &priv->vm);
110 if (ret)
111 return ret;
112
113 ret = nouveau_gpuobj_new(parent, NULL,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
115 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
116 &priv->vm->pgt[0].obj[0]);
117 dma = priv->vm->pgt[0].obj[0];
118 priv->vm->pgt[0].refcount[0] = 1;
119 if (ret)
120 return ret;
121
122 nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
123 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
124 return 0;
125}
126
127void
128nv04_vmmgr_dtor(struct nouveau_object *object)
129{
130 struct nv04_vmmgr_priv *priv = (void *)object;
131 if (priv->vm) {
132 nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
133 nouveau_vm_ref(NULL, &priv->vm, NULL);
134 }
135 if (priv->nullp) {
136 pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
137 priv->nullp, priv->null);
138 }
139 nouveau_vmmgr_destroy(&priv->base);
140}
141
142struct nouveau_oclass
143nv04_vmmgr_oclass = {
144 .handle = NV_SUBDEV(VM, 0x04),
145 .ofuncs = &(struct nouveau_ofuncs) {
146 .ctor = nv04_vmmgr_ctor,
147 .dtor = nv04_vmmgr_dtor,
148 .init = _nouveau_vmmgr_init,
149 .fini = _nouveau_vmmgr_fini,
150 },
151};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
new file mode 100644
index 000000000000..ec42d4bc86a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -0,0 +1,19 @@
1#ifndef __NV04_VMMGR_PRIV__
2#define __NV04_VMMGR_PRIV__
3
4#include <subdev/vm.h>
5
6struct nv04_vmmgr_priv {
7 struct nouveau_vmmgr base;
8 struct nouveau_vm *vm;
9 dma_addr_t null;
10 void *nullp;
11};
12
13static inline struct nv04_vmmgr_priv *
14nv04_vmmgr(void *obj)
15{
16 return (void *)nouveau_vmmgr(obj);
17}
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
new file mode 100644
index 000000000000..0203e1e12caa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/option.h>
27
28#include <subdev/timer.h>
29#include <subdev/vm.h>
30
31#include "nv04.h"
32
33#define NV41_GART_SIZE (512 * 1024 * 1024)
34#define NV41_GART_PAGE ( 4 * 1024)
35
36/*******************************************************************************
37 * VM map/unmap callbacks
38 ******************************************************************************/
39
40static void
41nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
42 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
43{
44 pte = pte * 4;
45 while (cnt) {
46 u32 page = PAGE_SIZE / NV41_GART_PAGE;
47 u64 phys = (u64)*list++;
48 while (cnt && page--) {
49 nv_wo32(pgt, pte, (phys >> 7) | 1);
50 phys += NV41_GART_PAGE;
51 pte += 4;
52 cnt -= 1;
53 }
54 }
55}
56
57static void
58nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
59{
60 pte = pte * 4;
61 while (cnt--) {
62 nv_wo32(pgt, pte, 0x00000000);
63 pte += 4;
64 }
65}
66
67static void
68nv41_vm_flush(struct nouveau_vm *vm)
69{
70 struct nv04_vm_priv *priv = (void *)vm->vmm;
71
72 mutex_lock(&nv_subdev(priv)->mutex);
73 nv_wr32(priv, 0x100810, 0x00000022);
74 if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
75 nv_warn(priv, "flush timeout, 0x%08x\n",
76 nv_rd32(priv, 0x100810));
77 }
78 nv_wr32(priv, 0x100810, 0x00000000);
79 mutex_unlock(&nv_subdev(priv)->mutex);
80}
81
82/*******************************************************************************
83 * VMMGR subdev
84 ******************************************************************************/
85
86static int
87nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_device *device = nv_device(parent);
92 struct nv04_vmmgr_priv *priv;
93 int ret;
94
95 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
96 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
97 data, size, pobject);
98 }
99
100 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
101 "pciegart", &priv);
102 *pobject = nv_object(priv);
103 if (ret)
104 return ret;
105
106 priv->base.create = nv04_vm_create;
107 priv->base.limit = NV41_GART_SIZE;
108 priv->base.dma_bits = 39;
109 priv->base.pgt_bits = 32 - 12;
110 priv->base.spg_shift = 12;
111 priv->base.lpg_shift = 12;
112 priv->base.map_sg = nv41_vm_map_sg;
113 priv->base.unmap = nv41_vm_unmap;
114 priv->base.flush = nv41_vm_flush;
115
116 ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
117 &priv->vm);
118 if (ret)
119 return ret;
120
121 ret = nouveau_gpuobj_new(parent, NULL,
122 (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
123 16, NVOBJ_FLAG_ZERO_ALLOC,
124 &priv->vm->pgt[0].obj[0]);
125 priv->vm->pgt[0].refcount[0] = 1;
126 if (ret)
127 return ret;
128
129 return 0;
130}
131
132static int
133nv41_vmmgr_init(struct nouveau_object *object)
134{
135 struct nv04_vmmgr_priv *priv = (void *)object;
136 struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
137 int ret;
138
139 ret = nouveau_vmmgr_init(&priv->base);
140 if (ret)
141 return ret;
142
143 nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
144 nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
145 nv_wr32(priv, 0x100820, 0x00000000);
146 return 0;
147}
148
149struct nouveau_oclass
150nv41_vmmgr_oclass = {
151 .handle = NV_SUBDEV(VM, 0x41),
152 .ofuncs = &(struct nouveau_ofuncs) {
153 .ctor = nv41_vmmgr_ctor,
154 .dtor = nv04_vmmgr_dtor,
155 .init = nv41_vmmgr_init,
156 .fini = _nouveau_vmmgr_fini,
157 },
158};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
new file mode 100644
index 000000000000..0ac18d05a146
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -0,0 +1,248 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/option.h>
27
28#include <subdev/timer.h>
29#include <subdev/vm.h>
30
31#include "nv04.h"
32
33#define NV44_GART_SIZE (512 * 1024 * 1024)
34#define NV44_GART_PAGE ( 4 * 1024)
35
36/*******************************************************************************
37 * VM map/unmap callbacks
38 ******************************************************************************/
39
40static void
41nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
42 dma_addr_t *list, u32 pte, u32 cnt)
43{
44 u32 base = (pte << 2) & ~0x0000000f;
45 u32 tmp[4];
46
47 tmp[0] = nv_ro32(pgt, base + 0x0);
48 tmp[1] = nv_ro32(pgt, base + 0x4);
49 tmp[2] = nv_ro32(pgt, base + 0x8);
50 tmp[3] = nv_ro32(pgt, base + 0xc);
51
52 while (cnt--) {
53 u32 addr = list ? (*list++ >> 12) : (null >> 12);
54 switch (pte++ & 0x3) {
55 case 0:
56 tmp[0] &= ~0x07ffffff;
57 tmp[0] |= addr;
58 break;
59 case 1:
60 tmp[0] &= ~0xf8000000;
61 tmp[0] |= addr << 27;
62 tmp[1] &= ~0x003fffff;
63 tmp[1] |= addr >> 5;
64 break;
65 case 2:
66 tmp[1] &= ~0xffc00000;
67 tmp[1] |= addr << 22;
68 tmp[2] &= ~0x0001ffff;
69 tmp[2] |= addr >> 10;
70 break;
71 case 3:
72 tmp[2] &= ~0xfffe0000;
73 tmp[2] |= addr << 17;
74 tmp[3] &= ~0x00000fff;
75 tmp[3] |= addr >> 15;
76 break;
77 }
78 }
79
80 nv_wo32(pgt, base + 0x0, tmp[0]);
81 nv_wo32(pgt, base + 0x4, tmp[1]);
82 nv_wo32(pgt, base + 0x8, tmp[2]);
83 nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
84}
85
86static void
87nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
88 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
89{
90 struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
91 u32 tmp[4];
92 int i;
93
94 if (pte & 3) {
95 u32 max = 4 - (pte & 3);
96 u32 part = (cnt > max) ? max : cnt;
97 nv44_vm_fill(pgt, priv->null, list, pte, part);
98 pte += part;
99 list += part;
100 cnt -= part;
101 }
102
103 while (cnt >= 4) {
104 for (i = 0; i < 4; i++)
105 tmp[i] = *list++ >> 12;
106 nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
107 nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
108 nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
109 nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
110 cnt -= 4;
111 }
112
113 if (cnt)
114 nv44_vm_fill(pgt, priv->null, list, pte, cnt);
115}
116
117static void
118nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
119{
120 struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
121
122 if (pte & 3) {
123 u32 max = 4 - (pte & 3);
124 u32 part = (cnt > max) ? max : cnt;
125 nv44_vm_fill(pgt, priv->null, NULL, pte, part);
126 pte += part;
127 cnt -= part;
128 }
129
130 while (cnt >= 4) {
131 nv_wo32(pgt, pte++ * 4, 0x00000000);
132 nv_wo32(pgt, pte++ * 4, 0x00000000);
133 nv_wo32(pgt, pte++ * 4, 0x00000000);
134 nv_wo32(pgt, pte++ * 4, 0x00000000);
135 cnt -= 4;
136 }
137
138 if (cnt)
139 nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
140}
141
142static void
143nv44_vm_flush(struct nouveau_vm *vm)
144{
145 struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
146 nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
147 nv_wr32(priv, 0x100808, 0x00000020);
148 if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
149 nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
150 nv_wr32(priv, 0x100808, 0x00000000);
151}
152
153/*******************************************************************************
154 * VMMGR subdev
155 ******************************************************************************/
156
157static int
158nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
159 struct nouveau_oclass *oclass, void *data, u32 size,
160 struct nouveau_object **pobject)
161{
162 struct nouveau_device *device = nv_device(parent);
163 struct nv04_vmmgr_priv *priv;
164 int ret;
165
166 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
167 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
168 data, size, pobject);
169 }
170
171 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
172 "pciegart", &priv);
173 *pobject = nv_object(priv);
174 if (ret)
175 return ret;
176
177 priv->base.create = nv04_vm_create;
178 priv->base.limit = NV44_GART_SIZE;
179 priv->base.dma_bits = 39;
180 priv->base.pgt_bits = 32 - 12;
181 priv->base.spg_shift = 12;
182 priv->base.lpg_shift = 12;
183 priv->base.map_sg = nv44_vm_map_sg;
184 priv->base.unmap = nv44_vm_unmap;
185 priv->base.flush = nv44_vm_flush;
186
187 priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
188 if (!priv->nullp) {
189 nv_error(priv, "unable to allocate dummy pages\n");
190 return -ENOMEM;
191 }
192
193 ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
194 &priv->vm);
195 if (ret)
196 return ret;
197
198 ret = nouveau_gpuobj_new(parent, NULL,
199 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
200 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
201 &priv->vm->pgt[0].obj[0]);
202 priv->vm->pgt[0].refcount[0] = 1;
203 if (ret)
204 return ret;
205
206 return 0;
207}
208
209static int
210nv44_vmmgr_init(struct nouveau_object *object)
211{
212 struct nv04_vmmgr_priv *priv = (void *)object;
213 struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
214 u32 addr;
215 int ret;
216
217 ret = nouveau_vmmgr_init(&priv->base);
218 if (ret)
219 return ret;
220
221 /* calculate vram address of this PRAMIN block, object must be
222 * allocated on 512KiB alignment, and not exceed a total size
223 * of 512KiB for this to work correctly
224 */
225 addr = nv_rd32(priv, 0x10020c);
226 addr -= ((gart->addr >> 19) + 1) << 19;
227
228 nv_wr32(priv, 0x100850, 0x80000000);
229 nv_wr32(priv, 0x100818, priv->null);
230 nv_wr32(priv, 0x100804, NV44_GART_SIZE);
231 nv_wr32(priv, 0x100850, 0x00008000);
232 nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
233 nv_wr32(priv, 0x100820, 0x00000000);
234 nv_wr32(priv, 0x10082c, 0x00000001);
235 nv_wr32(priv, 0x100800, addr | 0x00000010);
236 return 0;
237}
238
239struct nouveau_oclass
240nv44_vmmgr_oclass = {
241 .handle = NV_SUBDEV(VM, 0x44),
242 .ofuncs = &(struct nouveau_ofuncs) {
243 .ctor = nv44_vmmgr_ctor,
244 .dtor = nv04_vmmgr_dtor,
245 .init = nv44_vmmgr_init,
246 .fini = _nouveau_vmmgr_fini,
247 },
248};
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 179bb42a635c..e067f81c97b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -22,12 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
28#include "nouveau_vm.h" 29#include <subdev/fb.h>
30#include <subdev/vm.h>
29 31
30void 32struct nv50_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
37static void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 38nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
33{ 40{
@@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
35 u32 coverage = 0; 42 u32 coverage = 0;
36 43
37 if (pgt[0]) { 44 if (pgt[0]) {
38 phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ 45 phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
39 coverage = (pgt[0]->size >> 3) << 12; 46 coverage = (pgt[0]->size >> 3) << 12;
40 } else 47 } else
41 if (pgt[1]) { 48 if (pgt[1]) {
42 phys = 0x00000001 | pgt[1]->vinst; /* present */ 49 phys = 0x00000001 | pgt[1]->addr; /* present */
43 coverage = (pgt[1]->size >> 3) << 16; 50 coverage = (pgt[1]->size >> 3) << 16;
44 } 51 }
45 52
@@ -69,19 +76,18 @@ vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
69 return phys; 76 return phys;
70} 77}
71 78
72void 79static void
73nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 80nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
74 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 81 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
75{ 82{
76 struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
77 u32 comp = (mem->memtype & 0x180) >> 7; 83 u32 comp = (mem->memtype & 0x180) >> 7;
78 u32 block, target; 84 u32 block, target;
79 int i; 85 int i;
80 86
81 /* IGPs don't have real VRAM, re-target to stolen system memory */ 87 /* IGPs don't have real VRAM, re-target to stolen system memory */
82 target = 0; 88 target = 0;
83 if (dev_priv->vram_sys_base) { 89 if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
84 phys += dev_priv->vram_sys_base; 90 phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
85 target = 3; 91 target = 3;
86 } 92 }
87 93
@@ -103,7 +109,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
103 phys += block << (vma->node->type - 3); 109 phys += block << (vma->node->type - 3);
104 cnt -= block; 110 cnt -= block;
105 if (comp) { 111 if (comp) {
106 u32 tag = mem->tag->start + ((delta >> 16) * comp); 112 u32 tag = mem->tag->offset + ((delta >> 16) * comp);
107 offset_h |= (tag << 17); 113 offset_h |= (tag << 17);
108 delta += block << (vma->node->type - 3); 114 delta += block << (vma->node->type - 3);
109 } 115 }
@@ -117,7 +123,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
117 } 123 }
118} 124}
119 125
120void 126static void
121nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 127nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
122 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 128 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
123{ 129{
@@ -131,7 +137,7 @@ nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
131 } 137 }
132} 138}
133 139
134void 140static void
135nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) 141nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
136{ 142{
137 pte <<= 3; 143 pte <<= 3;
@@ -142,36 +148,80 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
142 } 148 }
143} 149}
144 150
145void 151static void
146nv50_vm_flush(struct nouveau_vm *vm) 152nv50_vm_flush(struct nouveau_vm *vm)
147{ 153{
148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 154 struct nouveau_engine *engine;
149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 int i; 155 int i;
151 156
152 pinstmem->flush(vm->dev); 157 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
153 158 if (atomic_read(&vm->engref[i])) {
154 /* BAR */ 159 engine = nouveau_engine(vm->vmm, i);
155 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { 160 if (engine && engine->tlb_flush)
156 nv50_vm_flush_engine(vm->dev, 6); 161 engine->tlb_flush(engine);
157 return; 162 }
158 }
159
160 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
161 if (atomic_read(&vm->engref[i]))
162 dev_priv->eng[i]->tlb_flush(vm->dev, i);
163 } 163 }
164} 164}
165 165
166void 166void
167nv50_vm_flush_engine(struct drm_device *dev, int engine) 167nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
168{ 168{
169 struct drm_nouveau_private *dev_priv = dev->dev_private; 169 struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
170 unsigned long flags; 170 unsigned long flags;
171 171
172 spin_lock_irqsave(&dev_priv->vm_lock, flags); 172 spin_lock_irqsave(&priv->lock, flags);
173 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 173 nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
174 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 174 if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
175 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 175 nv_error(subdev, "vm flush timeout: engine %d\n", engine);
176 spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 176 spin_unlock_irqrestore(&priv->lock, flags);
177}
178
179static int
180nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
181 u64 mm_offset, struct nouveau_vm **pvm)
182{
183 u32 block = (1 << (vmm->pgt_bits + 12));
184 if (block > length)
185 block = length;
186
187 return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
177} 188}
189
190static int
191nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
192 struct nouveau_oclass *oclass, void *data, u32 size,
193 struct nouveau_object **pobject)
194{
195 struct nv50_vmmgr_priv *priv;
196 int ret;
197
198 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
199 *pobject = nv_object(priv);
200 if (ret)
201 return ret;
202
203 priv->base.limit = 1ULL << 40;
204 priv->base.dma_bits = 40;
205 priv->base.pgt_bits = 29 - 12;
206 priv->base.spg_shift = 12;
207 priv->base.lpg_shift = 16;
208 priv->base.create = nv50_vm_create;
209 priv->base.map_pgt = nv50_vm_map_pgt;
210 priv->base.map = nv50_vm_map;
211 priv->base.map_sg = nv50_vm_map_sg;
212 priv->base.unmap = nv50_vm_unmap;
213 priv->base.flush = nv50_vm_flush;
214 spin_lock_init(&priv->lock);
215 return 0;
216}
217
218struct nouveau_oclass
219nv50_vmmgr_oclass = {
220 .handle = NV_SUBDEV(VM, 0x50),
221 .ofuncs = &(struct nouveau_ofuncs) {
222 .ctor = nv50_vmmgr_ctor,
223 .dtor = _nouveau_vmmgr_dtor,
224 .init = _nouveau_vmmgr_init,
225 .fini = _nouveau_vmmgr_fini,
226 },
227};
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 30d2bd58828f..30c61e6c2017 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -22,21 +22,28 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
28#include "nouveau_vm.h" 29#include <subdev/fb.h>
30#include <subdev/vm.h>
29 31
30void 32struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
37static void
31nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, 38nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
33{ 40{
34 u32 pde[2] = { 0, 0 }; 41 u32 pde[2] = { 0, 0 };
35 42
36 if (pgt[0]) 43 if (pgt[0])
37 pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); 44 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
38 if (pgt[1]) 45 if (pgt[1])
39 pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); 46 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
40 47
41 nv_wo32(pgd, (index * 8) + 0, pde[0]); 48 nv_wo32(pgd, (index * 8) + 0, pde[0]);
42 nv_wo32(pgd, (index * 8) + 4, pde[1]); 49 nv_wo32(pgd, (index * 8) + 4, pde[1]);
@@ -57,7 +64,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
57 return phys; 64 return phys;
58} 65}
59 66
60void 67static void
61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 68nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
62 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 69 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
63{ 70{
@@ -73,7 +80,7 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
73 } 80 }
74} 81}
75 82
76void 83static void
77nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 84nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
78 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 85 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
79{ 86{
@@ -88,7 +95,7 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
88 } 95 }
89} 96}
90 97
91void 98static void
92nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) 99nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
93{ 100{
94 pte <<= 3; 101 pte <<= 3;
@@ -100,37 +107,83 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
100} 107}
101 108
102void 109void
103nvc0_vm_flush(struct nouveau_vm *vm) 110nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
104{ 111{
105 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 112 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
106 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
107 struct drm_device *dev = vm->dev;
108 struct nouveau_vm_pgd *vpgd;
109 unsigned long flags; 113 unsigned long flags;
110 u32 engine;
111 114
112 engine = 1; 115 /* looks like maybe a "free flush slots" counter, the
113 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) 116 * faster you write to 0x100cbc to more it decreases
114 engine |= 4; 117 */
118 spin_lock_irqsave(&priv->lock, flags);
119 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120 nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
121 nv_rd32(subdev, 0x100c80), type);
122 }
123
124 nv_wr32(subdev, 0x100cb8, addr >> 8);
125 nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
126
127 /* wait for flush to be queued? */
128 if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129 nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
130 nv_rd32(subdev, 0x100c80), type);
131 }
132 spin_unlock_irqrestore(&priv->lock, flags);
133}
115 134
116 pinstmem->flush(vm->dev); 135static void
136nvc0_vm_flush(struct nouveau_vm *vm)
137{
138 struct nouveau_vm_pgd *vpgd;
117 139
118 spin_lock_irqsave(&dev_priv->vm_lock, flags);
119 list_for_each_entry(vpgd, &vm->pgd_list, head) { 140 list_for_each_entry(vpgd, &vm->pgd_list, head) {
120 /* looks like maybe a "free flush slots" counter, the 141 nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
121 * faster you write to 0x100cbc to more it decreases
122 */
123 if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
124 NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
125 nv_rd32(dev, 0x100c80), engine);
126 }
127 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
128 nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
129 /* wait for flush to be queued? */
130 if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
131 NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
132 nv_rd32(dev, 0x100c80), engine);
133 }
134 } 142 }
135 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
136} 143}
144
145static int
146nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
147 u64 mm_offset, struct nouveau_vm **pvm)
148{
149 return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
150}
151
152static int
153nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nouveau_oclass *oclass, void *data, u32 size,
155 struct nouveau_object **pobject)
156{
157 struct nvc0_vmmgr_priv *priv;
158 int ret;
159
160 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
161 *pobject = nv_object(priv);
162 if (ret)
163 return ret;
164
165 priv->base.limit = 1ULL << 40;
166 priv->base.dma_bits = 40;
167 priv->base.pgt_bits = 27 - 12;
168 priv->base.spg_shift = 12;
169 priv->base.lpg_shift = 17;
170 priv->base.create = nvc0_vm_create;
171 priv->base.map_pgt = nvc0_vm_map_pgt;
172 priv->base.map = nvc0_vm_map;
173 priv->base.map_sg = nvc0_vm_map_sg;
174 priv->base.unmap = nvc0_vm_unmap;
175 priv->base.flush = nvc0_vm_flush;
176 spin_lock_init(&priv->lock);
177 return 0;
178}
179
180struct nouveau_oclass
181nvc0_vmmgr_oclass = {
182 .handle = NV_SUBDEV(VM, 0xc0),
183 .ofuncs = &(struct nouveau_ofuncs) {
184 .ctor = nvc0_vmmgr_ctor,
185 .dtor = _nouveau_vmmgr_dtor,
186 .init = _nouveau_vmmgr_init,
187 .fini = _nouveau_vmmgr_fini,
188 },
189};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index ff23d88880e5..cc79c796afee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,23 +21,153 @@
21 * 21 *
22 */ 22 */
23 23
24#include "drmP.h" 24#include <core/object.h>
25#include <core/client.h>
26#include <core/device.h>
27#include <core/class.h>
28#include <core/mm.h>
25 29
26#include "nouveau_drv.h" 30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33
34#include "nouveau_drm.h"
27#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include "nouveau_gem.h"
37#include "nouveau_chan.h"
28#include "nouveau_abi16.h" 38#include "nouveau_abi16.h"
29#include "nouveau_ramht.h" 39
30#include "nouveau_software.h" 40struct nouveau_abi16 *
41nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
42{
43 struct nouveau_cli *cli = nouveau_cli(file_priv);
44 mutex_lock(&cli->mutex);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 INIT_LIST_HEAD(&abi16->channels);
50 abi16->client = nv_object(cli);
51
52 /* allocate device object targeting client's default
53 * device (ie. the one that belongs to the fd it
54 * opened)
55 */
56 if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
57 NVDRM_DEVICE, 0x0080,
58 &(struct nv_device_class) {
59 .device = ~0ULL,
60 },
61 sizeof(struct nv_device_class),
62 &abi16->device) == 0)
63 return cli->abi16;
64
65 kfree(cli->abi16);
66 cli->abi16 = NULL;
67 }
68
69 mutex_unlock(&cli->mutex);
70 }
71 return cli->abi16;
72}
73
74int
75nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
76{
77 struct nouveau_cli *cli = (void *)abi16->client;
78 mutex_unlock(&cli->mutex);
79 return ret;
80}
81
82u16
83nouveau_abi16_swclass(struct nouveau_drm *drm)
84{
85 switch (nv_device(drm->device)->card_type) {
86 case NV_04:
87 return 0x006e;
88 case NV_10:
89 case NV_20:
90 case NV_30:
91 case NV_40:
92 return 0x016e;
93 case NV_50:
94 return 0x506e;
95 case NV_C0:
96 case NV_D0:
97 case NV_E0:
98 return 0x906e;
99 }
100
101 return 0x0000;
102}
103
104static void
105nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
106 struct nouveau_abi16_ntfy *ntfy)
107{
108 nouveau_mm_free(&chan->heap, &ntfy->node);
109 list_del(&ntfy->head);
110 kfree(ntfy);
111}
112
113static void
114nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
115 struct nouveau_abi16_chan *chan)
116{
117 struct nouveau_abi16_ntfy *ntfy, *temp;
118
119 /* cleanup notifier state */
120 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
121 nouveau_abi16_ntfy_fini(chan, ntfy);
122 }
123
124 if (chan->ntfy) {
125 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
126 drm_gem_object_unreference_unlocked(chan->ntfy->gem);
127 }
128
129 if (chan->heap.block_size)
130 nouveau_mm_fini(&chan->heap);
131
132 /* destroy channel object, all children will be killed too */
133 if (chan->chan) {
134 abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
135 nouveau_channel_del(&chan->chan);
136 }
137
138 list_del(&chan->head);
139 kfree(chan);
140}
141
142void
143nouveau_abi16_fini(struct nouveau_abi16 *abi16)
144{
145 struct nouveau_cli *cli = (void *)abi16->client;
146 struct nouveau_abi16_chan *chan, *temp;
147
148 /* cleanup channels */
149 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
150 nouveau_abi16_chan_fini(abi16, chan);
151 }
152
153 /* destroy the device object */
154 nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
155
156 kfree(cli->abi16);
157 cli->abi16 = NULL;
158}
31 159
32int 160int
33nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) 161nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
34{ 162{
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct nouveau_drm *drm = nouveau_drm(dev);
164 struct nouveau_device *device = nv_device(drm->device);
165 struct nouveau_timer *ptimer = nouveau_timer(device);
36 struct drm_nouveau_getparam *getparam = data; 166 struct drm_nouveau_getparam *getparam = data;
37 167
38 switch (getparam->param) { 168 switch (getparam->param) {
39 case NOUVEAU_GETPARAM_CHIPSET_ID: 169 case NOUVEAU_GETPARAM_CHIPSET_ID:
40 getparam->value = dev_priv->chipset; 170 getparam->value = device->chipset;
41 break; 171 break;
42 case NOUVEAU_GETPARAM_PCI_VENDOR: 172 case NOUVEAU_GETPARAM_PCI_VENDOR:
43 getparam->value = dev->pci_vendor; 173 getparam->value = dev->pci_vendor;
@@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
55 getparam->value = 2; 185 getparam->value = 2;
56 break; 186 break;
57 case NOUVEAU_GETPARAM_FB_SIZE: 187 case NOUVEAU_GETPARAM_FB_SIZE:
58 getparam->value = dev_priv->fb_available_size; 188 getparam->value = drm->gem.vram_available;
59 break; 189 break;
60 case NOUVEAU_GETPARAM_AGP_SIZE: 190 case NOUVEAU_GETPARAM_AGP_SIZE:
61 getparam->value = dev_priv->gart_info.aper_size; 191 getparam->value = drm->gem.gart_available;
62 break; 192 break;
63 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 193 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
64 getparam->value = 0; /* deprecated */ 194 getparam->value = 0; /* deprecated */
65 break; 195 break;
66 case NOUVEAU_GETPARAM_PTIMER_TIME: 196 case NOUVEAU_GETPARAM_PTIMER_TIME:
67 getparam->value = dev_priv->engine.timer.read(dev); 197 getparam->value = ptimer->read(ptimer);
68 break; 198 break;
69 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 199 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
70 getparam->value = 1; 200 getparam->value = 1;
@@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
76 /* NV40 and NV50 versions are quite different, but register 206 /* NV40 and NV50 versions are quite different, but register
77 * address is the same. User is supposed to know the card 207 * address is the same. User is supposed to know the card
78 * family anyway... */ 208 * family anyway... */
79 if (dev_priv->chipset >= 0x40) { 209 if (device->chipset >= 0x40) {
80 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); 210 getparam->value = nv_rd32(device, 0x001540);
81 break; 211 break;
82 } 212 }
83 /* FALLTHRU */ 213 /* FALLTHRU */
84 default: 214 default:
85 NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); 215 nv_debug(device, "unknown parameter %lld\n", getparam->param);
86 return -EINVAL; 216 return -EINVAL;
87 } 217 }
88 218
@@ -98,148 +228,252 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
98int 228int
99nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) 229nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
100{ 230{
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 struct drm_nouveau_channel_alloc *init = data; 231 struct drm_nouveau_channel_alloc *init = data;
103 struct nouveau_channel *chan; 232 struct nouveau_cli *cli = nouveau_cli(file_priv);
233 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
235 struct nouveau_abi16_chan *chan;
236 struct nouveau_client *client;
237 struct nouveau_device *device;
238 struct nouveau_instmem *imem;
239 struct nouveau_fb *pfb;
104 int ret; 240 int ret;
105 241
106 if (!dev_priv->eng[NVOBJ_ENGINE_GR]) 242 if (unlikely(!abi16))
107 return -ENODEV; 243 return -ENOMEM;
244 client = nv_client(abi16->client);
108 245
109 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 246 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
110 return -EINVAL; 247 return nouveau_abi16_put(abi16, -EINVAL);
248
249 device = nv_device(abi16->device);
250 imem = nouveau_instmem(device);
251 pfb = nouveau_fb(device);
252
253 /* allocate "abi16 channel" data and make up a handle for it */
254 init->channel = ffsll(~abi16->handles);
255 if (!init->channel--)
256 return nouveau_abi16_put(abi16, -ENOSPC);
257
258 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
259 if (!chan)
260 return nouveau_abi16_put(abi16, -ENOMEM);
261
262 INIT_LIST_HEAD(&chan->notifiers);
263 list_add(&chan->head, &abi16->channels);
264 abi16->handles |= (1 << init->channel);
265
266 /* create channel object and initialise dma and fence management */
267 if (device->card_type >= NV_E0) {
268 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
269 init->tt_ctxdma_handle = 0;
270 }
111 271
112 ret = nouveau_channel_alloc(dev, &chan, file_priv, 272 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
113 init->fb_ctxdma_handle, 273 init->channel, init->fb_ctxdma_handle,
114 init->tt_ctxdma_handle); 274 init->tt_ctxdma_handle, &chan->chan);
115 if (ret) 275 if (ret)
116 return ret; 276 goto done;
117 init->channel = chan->id; 277
118 278 if (device->card_type >= NV_50)
119 if (nouveau_vram_pushbuf == 0) { 279 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
120 if (chan->dma.ib_max) 280 NOUVEAU_GEM_DOMAIN_GART;
121 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 281 else
122 NOUVEAU_GEM_DOMAIN_GART; 282 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
123 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
124 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
125 else
126 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
127 } else {
128 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 283 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
129 } 284 else
285 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
130 286
131 if (dev_priv->card_type < NV_C0) { 287 if (device->card_type < NV_C0) {
132 init->subchan[0].handle = 0x00000000; 288 init->subchan[0].handle = 0x00000000;
133 init->subchan[0].grclass = 0x0000; 289 init->subchan[0].grclass = 0x0000;
134 init->subchan[1].handle = NvSw; 290 init->subchan[1].handle = NvSw;
135 init->subchan[1].grclass = NV_SW; 291 init->subchan[1].grclass = 0x506e;
136 init->nr_subchan = 2; 292 init->nr_subchan = 2;
137 } 293 }
138 294
139 /* Named memory object area */ 295 /* Named memory object area */
140 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 296 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
297 0, 0, &chan->ntfy);
298 if (ret == 0)
299 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
300 if (ret)
301 goto done;
302
303 if (device->card_type >= NV_50) {
304 ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
305 &chan->ntfy_vma);
306 if (ret)
307 goto done;
308 }
309
310 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
141 &init->notifier_handle); 311 &init->notifier_handle);
312 if (ret)
313 goto done;
142 314
143 if (ret == 0) 315 ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
144 atomic_inc(&chan->users); /* userspace reference */ 316done:
145 nouveau_channel_put(&chan); 317 if (ret)
146 return ret; 318 nouveau_abi16_chan_fini(abi16, chan);
319 return nouveau_abi16_put(abi16, ret);
147} 320}
148 321
322
149int 323int
150nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 324nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
151{ 325{
152 struct drm_nouveau_channel_free *req = data; 326 struct drm_nouveau_channel_free *req = data;
153 struct nouveau_channel *chan; 327 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
328 struct nouveau_abi16_chan *chan;
329 int ret = -ENOENT;
154 330
155 chan = nouveau_channel_get(file_priv, req->channel); 331 if (unlikely(!abi16))
156 if (IS_ERR(chan)) 332 return -ENOMEM;
157 return PTR_ERR(chan);
158 333
159 list_del(&chan->list); 334 list_for_each_entry(chan, &abi16->channels, head) {
160 atomic_dec(&chan->users); 335 if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
161 nouveau_channel_put(&chan); 336 nouveau_abi16_chan_fini(abi16, chan);
162 return 0; 337 return nouveau_abi16_put(abi16, 0);
338 }
339 }
340
341 return nouveau_abi16_put(abi16, ret);
163} 342}
164 343
165int 344int
166nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 345nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
167{ 346{
168 struct drm_nouveau_grobj_alloc *init = data; 347 struct drm_nouveau_grobj_alloc *init = data;
169 struct nouveau_channel *chan; 348 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
349 struct nouveau_drm *drm = nouveau_drm(dev);
350 struct nouveau_object *object;
170 int ret; 351 int ret;
171 352
353 if (unlikely(!abi16))
354 return -ENOMEM;
355
172 if (init->handle == ~0) 356 if (init->handle == ~0)
173 return -EINVAL; 357 return nouveau_abi16_put(abi16, -EINVAL);
174 358
175 /* compatibility with userspace that assumes 506e for all chipsets */ 359 /* compatibility with userspace that assumes 506e for all chipsets */
176 if (init->class == 0x506e) { 360 if (init->class == 0x506e) {
177 init->class = nouveau_software_class(dev); 361 init->class = nouveau_abi16_swclass(drm);
178 if (init->class == 0x906e) 362 if (init->class == 0x906e)
179 return 0; 363 return nouveau_abi16_put(abi16, 0);
180 } else
181 if (init->class == 0x906e) {
182 NV_ERROR(dev, "906e not supported yet\n");
183 return -EINVAL;
184 }
185
186 chan = nouveau_channel_get(file_priv, init->channel);
187 if (IS_ERR(chan))
188 return PTR_ERR(chan);
189
190 if (nouveau_ramht_find(chan, init->handle)) {
191 ret = -EEXIST;
192 goto out;
193 }
194
195 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
196 if (ret) {
197 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
198 ret, init->channel, init->handle);
199 } 364 }
200 365
201out: 366 ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
202 nouveau_channel_put(&chan); 367 init->handle, init->class, NULL, 0, &object);
203 return ret; 368 return nouveau_abi16_put(abi16, ret);
204} 369}
205 370
206int 371int
207nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 372nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
208{ 373{
209 struct drm_nouveau_private *dev_priv = dev->dev_private; 374 struct drm_nouveau_notifierobj_alloc *info = data;
210 struct drm_nouveau_notifierobj_alloc *na = data; 375 struct nouveau_drm *drm = nouveau_drm(dev);
211 struct nouveau_channel *chan; 376 struct nouveau_device *device = nv_device(drm->device);
377 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
378 struct nouveau_abi16_chan *chan, *temp;
379 struct nouveau_abi16_ntfy *ntfy;
380 struct nouveau_object *object;
381 struct nv_dma_class args;
212 int ret; 382 int ret;
213 383
384 if (unlikely(!abi16))
385 return -ENOMEM;
386
214 /* completely unnecessary for these chipsets... */ 387 /* completely unnecessary for these chipsets... */
215 if (unlikely(dev_priv->card_type >= NV_C0)) 388 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
216 return -EINVAL; 389 return nouveau_abi16_put(abi16, -EINVAL);
217 390
218 chan = nouveau_channel_get(file_priv, na->channel); 391 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
219 if (IS_ERR(chan)) 392 if (chan->chan->handle == (NVDRM_CHAN | info->channel))
220 return PTR_ERR(chan); 393 break;
394 chan = NULL;
395 }
221 396
222 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, 397 if (!chan)
223 &na->offset); 398 return nouveau_abi16_put(abi16, -ENOENT);
224 nouveau_channel_put(&chan); 399
225 return ret; 400 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
401 if (!ntfy)
402 return nouveau_abi16_put(abi16, -ENOMEM);
403
404 list_add(&ntfy->head, &chan->notifiers);
405 ntfy->handle = info->handle;
406
407 ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
408 &ntfy->node);
409 if (ret)
410 goto done;
411
412 args.start = ntfy->node->offset;
413 args.limit = ntfy->node->offset + ntfy->node->length - 1;
414 if (device->card_type >= NV_50) {
415 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
416 args.start += chan->ntfy_vma.offset;
417 args.limit += chan->ntfy_vma.offset;
418 } else
419 if (drm->agp.stat == ENABLED) {
420 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
421 args.start += drm->agp.base + chan->ntfy->bo.offset;
422 args.limit += drm->agp.base + chan->ntfy->bo.offset;
423 } else {
424 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
425 args.start += chan->ntfy->bo.offset;
426 args.limit += chan->ntfy->bo.offset;
427 }
428
429 ret = nouveau_object_new(abi16->client, chan->chan->handle,
430 ntfy->handle, 0x003d, &args,
431 sizeof(args), &object);
432 if (ret)
433 goto done;
434
435done:
436 if (ret)
437 nouveau_abi16_ntfy_fini(chan, ntfy);
438 return nouveau_abi16_put(abi16, ret);
226} 439}
227 440
228int 441int
229nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 442nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
230{ 443{
231 struct drm_nouveau_gpuobj_free *objfree = data; 444 struct drm_nouveau_gpuobj_free *fini = data;
232 struct nouveau_channel *chan; 445 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
446 struct nouveau_abi16_chan *chan, *temp;
447 struct nouveau_abi16_ntfy *ntfy;
233 int ret; 448 int ret;
234 449
235 chan = nouveau_channel_get(file_priv, objfree->channel); 450 if (unlikely(!abi16))
236 if (IS_ERR(chan)) 451 return -ENOMEM;
237 return PTR_ERR(chan); 452
453 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
454 if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
455 break;
456 chan = NULL;
457 }
458
459 if (!chan)
460 return nouveau_abi16_put(abi16, -ENOENT);
238 461
239 /* Synchronize with the user channel */ 462 /* synchronize with the user channel and destroy the gpu object */
240 nouveau_channel_idle(chan); 463 nouveau_channel_idle(chan->chan);
241 464
242 ret = nouveau_ramht_remove(chan, objfree->handle); 465 ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
243 nouveau_channel_put(&chan); 466 if (ret)
244 return ret; 467 return nouveau_abi16_put(abi16, ret);
468
469 /* cleanup extra state if this object was a notifier */
470 list_for_each_entry(ntfy, &chan->notifiers, head) {
471 if (ntfy->handle == fini->handle) {
472 nouveau_mm_free(&chan->heap, &ntfy->node);
473 list_del(&ntfy->head);
474 break;
475 }
476 }
477
478 return nouveau_abi16_put(abi16, 0);
245} 479}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index e6328b008a8c..90004081a501 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -3,6 +3,7 @@
3 3
4#define ABI16_IOCTL_ARGS \ 4#define ABI16_IOCTL_ARGS \
5 struct drm_device *dev, void *data, struct drm_file *file_priv 5 struct drm_device *dev, void *data, struct drm_file *file_priv
6
6int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); 7int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
7int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); 8int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
8int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); 9int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
11int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); 12int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
12int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); 13int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
13 14
15struct nouveau_abi16_ntfy {
16 struct list_head head;
17 struct nouveau_mm_node *node;
18 u32 handle;
19};
20
21struct nouveau_abi16_chan {
22 struct list_head head;
23 struct nouveau_channel *chan;
24 struct list_head notifiers;
25 struct nouveau_bo *ntfy;
26 struct nouveau_vma ntfy_vma;
27 struct nouveau_mm heap;
28};
29
30struct nouveau_abi16 {
31 struct nouveau_object *client;
32 struct nouveau_object *device;
33 struct list_head channels;
34 u64 handles;
35};
36
37struct nouveau_drm;
38struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
39int nouveau_abi16_put(struct nouveau_abi16 *, int);
40void nouveau_abi16_fini(struct nouveau_abi16 *);
41u16 nouveau_abi16_swclass(struct nouveau_drm *);
42
43#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
44#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
45
14struct drm_nouveau_channel_alloc { 46struct drm_nouveau_channel_alloc {
15 uint32_t fb_ctxdma_handle; 47 uint32_t fb_ctxdma_handle;
16 uint32_t tt_ctxdma_handle; 48 uint32_t tt_ctxdma_handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 26ebffebe710..e7369c8239d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -7,17 +7,13 @@
7#include <acpi/acpi.h> 7#include <acpi/acpi.h>
8#include <linux/mxm-wmi.h> 8#include <linux/mxm-wmi.h>
9 9
10#include "drmP.h"
11#include "drm.h"
12#include "drm_sarea.h"
13#include "drm_crtc_helper.h"
14#include "nouveau_drv.h"
15#include "nouveau_drm.h"
16#include "nv50_display.h"
17#include "nouveau_connector.h"
18
19#include <linux/vga_switcheroo.h> 10#include <linux/vga_switcheroo.h>
20 11
12#include "drm_edid.h"
13
14#include "nouveau_drm.h"
15#include "nouveau_acpi.h"
16
21#define NOUVEAU_DSM_LED 0x02 17#define NOUVEAU_DSM_LED 0x02
22#define NOUVEAU_DSM_LED_STATE 0x00 18#define NOUVEAU_DSM_LED_STATE 0x00
23#define NOUVEAU_DSM_LED_OFF 0x10 19#define NOUVEAU_DSM_LED_OFF 0x10
@@ -390,10 +386,9 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
390 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); 386 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
391} 387}
392 388
393int 389void *
394nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) 390nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
395{ 391{
396 struct nouveau_connector *nv_connector = nouveau_connector(connector);
397 struct acpi_device *acpidev; 392 struct acpi_device *acpidev;
398 acpi_handle handle; 393 acpi_handle handle;
399 int type, ret; 394 int type, ret;
@@ -405,21 +400,20 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
405 type = ACPI_VIDEO_DISPLAY_LCD; 400 type = ACPI_VIDEO_DISPLAY_LCD;
406 break; 401 break;
407 default: 402 default:
408 return -EINVAL; 403 return NULL;
409 } 404 }
410 405
411 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 406 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
412 if (!handle) 407 if (!handle)
413 return -ENODEV; 408 return NULL;
414 409
415 ret = acpi_bus_get_device(handle, &acpidev); 410 ret = acpi_bus_get_device(handle, &acpidev);
416 if (ret) 411 if (ret)
417 return -ENODEV; 412 return NULL;
418 413
419 ret = acpi_video_get_edid(acpidev, type, -1, &edid); 414 ret = acpi_video_get_edid(acpidev, type, -1, &edid);
420 if (ret < 0) 415 if (ret < 0)
421 return ret; 416 return NULL;
422 417
423 nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); 418 return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
424 return 0;
425} 419}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
new file mode 100644
index 000000000000..08af67722b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -0,0 +1,22 @@
1#ifndef __NOUVEAU_ACPI_H__
2#define __NOUVEAU_ACPI_H__
3
4#define ROM_BIOS_PAGE 4096
5
6#if defined(CONFIG_ACPI)
7void nouveau_register_dsm_handler(void);
8void nouveau_unregister_dsm_handler(void);
9void nouveau_switcheroo_optimus_dsm(void);
10int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
11bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
12void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
13#else
14static inline void nouveau_register_dsm_handler(void) {}
15static inline void nouveau_unregister_dsm_handler(void) {}
16static inline void nouveau_switcheroo_optimus_dsm(void) {}
17static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
18static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
19static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
20#endif
21
22#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
new file mode 100644
index 000000000000..d28430cd2ba6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -0,0 +1,152 @@
1#include <linux/module.h>
2
3#include <core/device.h>
4
5#include "nouveau_drm.h"
6#include "nouveau_agp.h"
7#include "nouveau_reg.h"
8
9#if __OS_HAS_AGP
10MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
11static int nouveau_agpmode = -1;
12module_param_named(agpmode, nouveau_agpmode, int, 0400);
13
14static unsigned long
15get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
16{
17 struct nouveau_device *device = nv_device(drm->device);
18
19 /*
20 * FW seems to be broken on nv18, it makes the card lock up
21 * randomly.
22 */
23 if (device->chipset == 0x18)
24 mode &= ~PCI_AGP_COMMAND_FW;
25
26 /*
27 * AGP mode set in the command line.
28 */
29 if (nouveau_agpmode > 0) {
30 bool agpv3 = mode & 0x8;
31 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
32
33 mode = (mode & ~0x7) | (rate & 0x7);
34 }
35
36 return mode;
37}
38
39static bool
40nouveau_agp_enabled(struct nouveau_drm *drm)
41{
42 struct drm_device *dev = drm->dev;
43
44 if (!drm_pci_device_is_agp(dev) || !dev->agp)
45 return false;
46
47 if (drm->agp.stat == UNKNOWN) {
48 if (!nouveau_agpmode)
49 return false;
50 return true;
51 }
52
53 return (drm->agp.stat == ENABLED);
54}
55#endif
56
57void
58nouveau_agp_reset(struct nouveau_drm *drm)
59{
60#if __OS_HAS_AGP
61 struct nouveau_device *device = nv_device(drm->device);
62 struct drm_device *dev = drm->dev;
63 u32 save[2];
64 int ret;
65
66 if (!nouveau_agp_enabled(drm))
67 return;
68
69 /* First of all, disable fast writes, otherwise if it's
70 * already enabled in the AGP bridge and we disable the card's
71 * AGP controller we might be locking ourselves out of it. */
72 if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
73 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
74 struct drm_agp_info info;
75 struct drm_agp_mode mode;
76
77 ret = drm_agp_info(dev, &info);
78 if (ret)
79 return;
80
81 mode.mode = get_agp_mode(drm, info.mode);
82 mode.mode &= ~PCI_AGP_COMMAND_FW;
83
84 ret = drm_agp_enable(dev, mode);
85 if (ret)
86 return;
87 }
88
89
90 /* clear busmaster bit, and disable AGP */
91 save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
92 nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
93
94 /* reset PGRAPH, PFIFO and PTIMER */
95 save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
96 nv_mask(device, 0x000200, 0x00011100, save[1]);
97
98 /* and restore bustmaster bit (gives effect of resetting AGP) */
99 nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
100#endif
101}
102
103void
104nouveau_agp_init(struct nouveau_drm *drm)
105{
106#if __OS_HAS_AGP
107 struct nouveau_device *device = nv_device(drm->device);
108 struct drm_device *dev = drm->dev;
109 struct drm_agp_info info;
110 struct drm_agp_mode mode;
111 int ret;
112
113 if (!nouveau_agp_enabled(drm))
114 return;
115 drm->agp.stat = DISABLE;
116
117 ret = drm_agp_acquire(dev);
118 if (ret) {
119 nv_error(device, "unable to acquire AGP: %d\n", ret);
120 return;
121 }
122
123 ret = drm_agp_info(dev, &info);
124 if (ret) {
125 nv_error(device, "unable to get AGP info: %d\n", ret);
126 return;
127 }
128
129 /* see agp.h for the AGPSTAT_* modes available */
130 mode.mode = get_agp_mode(drm, info.mode);
131
132 ret = drm_agp_enable(dev, mode);
133 if (ret) {
134 nv_error(device, "unable to enable AGP: %d\n", ret);
135 return;
136 }
137
138 drm->agp.stat = ENABLED;
139 drm->agp.base = info.aperture_base;
140 drm->agp.size = info.aperture_size;
141#endif
142}
143
144void
145nouveau_agp_fini(struct nouveau_drm *drm)
146{
147#if __OS_HAS_AGP
148 struct drm_device *dev = drm->dev;
149 if (dev->agp && dev->agp->acquired)
150 drm_agp_release(dev);
151#endif
152}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
new file mode 100644
index 000000000000..b55c08652963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.h
@@ -0,0 +1,10 @@
1#ifndef __NOUVEAU_AGP_H__
2#define __NOUVEAU_AGP_H__
3
4struct nouveau_drm;
5
6void nouveau_agp_reset(struct nouveau_drm *);
7void nouveau_agp_init(struct nouveau_drm *);
8void nouveau_agp_fini(struct nouveau_drm *);
9
10#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index fa22b28e8777..f65b20a375f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -33,8 +33,6 @@
33#include <linux/backlight.h> 33#include <linux/backlight.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35 35
36#include "drmP.h"
37#include "nouveau_drv.h"
38#include "nouveau_drm.h" 36#include "nouveau_drm.h"
39#include "nouveau_reg.h" 37#include "nouveau_reg.h"
40#include "nouveau_encoder.h" 38#include "nouveau_encoder.h"
@@ -42,9 +40,10 @@
42static int 40static int
43nv40_get_intensity(struct backlight_device *bd) 41nv40_get_intensity(struct backlight_device *bd)
44{ 42{
45 struct drm_device *dev = bl_get_data(bd); 43 struct nouveau_drm *drm = bl_get_data(bd);
46 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) 44 struct nouveau_device *device = nv_device(drm->device);
47 >> 16; 45 int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
46 NV40_PMC_BACKLIGHT_MASK) >> 16;
48 47
49 return val; 48 return val;
50} 49}
@@ -52,11 +51,12 @@ nv40_get_intensity(struct backlight_device *bd)
52static int 51static int
53nv40_set_intensity(struct backlight_device *bd) 52nv40_set_intensity(struct backlight_device *bd)
54{ 53{
55 struct drm_device *dev = bl_get_data(bd); 54 struct nouveau_drm *drm = bl_get_data(bd);
55 struct nouveau_device *device = nv_device(drm->device);
56 int val = bd->props.brightness; 56 int val = bd->props.brightness;
57 int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT); 57 int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
58 58
59 nv_wr32(dev, NV40_PMC_BACKLIGHT, 59 nv_wr32(device, NV40_PMC_BACKLIGHT,
60 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); 60 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
61 61
62 return 0; 62 return 0;
@@ -71,23 +71,20 @@ static const struct backlight_ops nv40_bl_ops = {
71static int 71static int
72nv40_backlight_init(struct drm_connector *connector) 72nv40_backlight_init(struct drm_connector *connector)
73{ 73{
74 struct drm_device *dev = connector->dev; 74 struct nouveau_drm *drm = nouveau_drm(connector->dev);
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct nouveau_device *device = nv_device(drm->device);
76 struct backlight_properties props; 76 struct backlight_properties props;
77 struct backlight_device *bd; 77 struct backlight_device *bd;
78 78
79 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 79 if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
80 return 0; 80 return 0;
81 81
82 memset(&props, 0, sizeof(struct backlight_properties)); 82 memset(&props, 0, sizeof(struct backlight_properties));
83 props.type = BACKLIGHT_RAW; 83 props.type = BACKLIGHT_RAW;
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, dev, 85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd)) 87 drm->backlight = bd;
88 return PTR_ERR(bd);
89
90 dev_priv->backlight = bd;
91 bd->props.brightness = nv40_get_intensity(bd); 88 bd->props.brightness = nv40_get_intensity(bd);
92 backlight_update_status(bd); 89 backlight_update_status(bd);
93 90
@@ -98,12 +95,13 @@ static int
98nv50_get_intensity(struct backlight_device *bd) 95nv50_get_intensity(struct backlight_device *bd)
99{ 96{
100 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 97 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
101 struct drm_device *dev = nv_encoder->base.base.dev; 98 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
99 struct nouveau_device *device = nv_device(drm->device);
102 int or = nv_encoder->or; 100 int or = nv_encoder->or;
103 u32 div = 1025; 101 u32 div = 1025;
104 u32 val; 102 u32 val;
105 103
106 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); 104 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
107 val &= NV50_PDISP_SOR_PWM_CTL_VAL; 105 val &= NV50_PDISP_SOR_PWM_CTL_VAL;
108 return ((val * 100) + (div / 2)) / div; 106 return ((val * 100) + (div / 2)) / div;
109} 107}
@@ -112,13 +110,14 @@ static int
112nv50_set_intensity(struct backlight_device *bd) 110nv50_set_intensity(struct backlight_device *bd)
113{ 111{
114 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 112 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
115 struct drm_device *dev = nv_encoder->base.base.dev; 113 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
114 struct nouveau_device *device = nv_device(drm->device);
116 int or = nv_encoder->or; 115 int or = nv_encoder->or;
117 u32 div = 1025; 116 u32 div = 1025;
118 u32 val = (bd->props.brightness * div) / 100; 117 u32 val = (bd->props.brightness * div) / 100;
119 118
120 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), 119 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
121 NV50_PDISP_SOR_PWM_CTL_NEW | val); 120 NV50_PDISP_SOR_PWM_CTL_NEW | val);
122 return 0; 121 return 0;
123} 122}
124 123
@@ -132,12 +131,13 @@ static int
132nva3_get_intensity(struct backlight_device *bd) 131nva3_get_intensity(struct backlight_device *bd)
133{ 132{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 133 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct drm_device *dev = nv_encoder->base.base.dev; 134 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
135 struct nouveau_device *device = nv_device(drm->device);
136 int or = nv_encoder->or; 136 int or = nv_encoder->or;
137 u32 div, val; 137 u32 div, val;
138 138
139 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); 139 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
140 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); 140 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
141 val &= NVA3_PDISP_SOR_PWM_CTL_VAL; 141 val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
142 if (div && div >= val) 142 if (div && div >= val)
143 return ((val * 100) + (div / 2)) / div; 143 return ((val * 100) + (div / 2)) / div;
@@ -149,16 +149,17 @@ static int
149nva3_set_intensity(struct backlight_device *bd) 149nva3_set_intensity(struct backlight_device *bd)
150{ 150{
151 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 151 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
152 struct drm_device *dev = nv_encoder->base.base.dev; 152 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
153 struct nouveau_device *device = nv_device(drm->device);
153 int or = nv_encoder->or; 154 int or = nv_encoder->or;
154 u32 div, val; 155 u32 div, val;
155 156
156 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); 157 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
157 val = (bd->props.brightness * div) / 100; 158 val = (bd->props.brightness * div) / 100;
158 if (div) { 159 if (div) {
159 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val | 160 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
160 NV50_PDISP_SOR_PWM_CTL_NEW | 161 NV50_PDISP_SOR_PWM_CTL_NEW |
161 NVA3_PDISP_SOR_PWM_CTL_UNK); 162 NVA3_PDISP_SOR_PWM_CTL_UNK);
162 return 0; 163 return 0;
163 } 164 }
164 165
@@ -174,26 +175,26 @@ static const struct backlight_ops nva3_bl_ops = {
174static int 175static int
175nv50_backlight_init(struct drm_connector *connector) 176nv50_backlight_init(struct drm_connector *connector)
176{ 177{
177 struct drm_device *dev = connector->dev; 178 struct nouveau_drm *drm = nouveau_drm(connector->dev);
178 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 struct nouveau_device *device = nv_device(drm->device);
179 struct nouveau_encoder *nv_encoder; 180 struct nouveau_encoder *nv_encoder;
180 struct backlight_properties props; 181 struct backlight_properties props;
181 struct backlight_device *bd; 182 struct backlight_device *bd;
182 const struct backlight_ops *ops; 183 const struct backlight_ops *ops;
183 184
184 nv_encoder = find_encoder(connector, OUTPUT_LVDS); 185 nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
185 if (!nv_encoder) { 186 if (!nv_encoder) {
186 nv_encoder = find_encoder(connector, OUTPUT_DP); 187 nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
187 if (!nv_encoder) 188 if (!nv_encoder)
188 return -ENODEV; 189 return -ENODEV;
189 } 190 }
190 191
191 if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 192 if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
192 return 0; 193 return 0;
193 194
194 if (dev_priv->chipset <= 0xa0 || 195 if (device->chipset <= 0xa0 ||
195 dev_priv->chipset == 0xaa || 196 device->chipset == 0xaa ||
196 dev_priv->chipset == 0xac) 197 device->chipset == 0xac)
197 ops = &nv50_bl_ops; 198 ops = &nv50_bl_ops;
198 else 199 else
199 ops = &nva3_bl_ops; 200 ops = &nva3_bl_ops;
@@ -206,7 +207,7 @@ nv50_backlight_init(struct drm_connector *connector)
206 if (IS_ERR(bd)) 207 if (IS_ERR(bd))
207 return PTR_ERR(bd); 208 return PTR_ERR(bd);
208 209
209 dev_priv->backlight = bd; 210 drm->backlight = bd;
210 bd->props.brightness = bd->ops->get_brightness(bd); 211 bd->props.brightness = bd->ops->get_brightness(bd);
211 backlight_update_status(bd); 212 backlight_update_status(bd);
212 return 0; 213 return 0;
@@ -215,12 +216,13 @@ nv50_backlight_init(struct drm_connector *connector)
215int 216int
216nouveau_backlight_init(struct drm_device *dev) 217nouveau_backlight_init(struct drm_device *dev)
217{ 218{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 219 struct nouveau_drm *drm = nouveau_drm(dev);
220 struct nouveau_device *device = nv_device(drm->device);
219 struct drm_connector *connector; 221 struct drm_connector *connector;
220 222
221#ifdef CONFIG_ACPI 223#ifdef CONFIG_ACPI
222 if (acpi_video_backlight_support()) { 224 if (acpi_video_backlight_support()) {
223 NV_INFO(dev, "ACPI backlight interface available, " 225 NV_INFO(drm, "ACPI backlight interface available, "
224 "not registering our own\n"); 226 "not registering our own\n");
225 return 0; 227 return 0;
226 } 228 }
@@ -231,7 +233,7 @@ nouveau_backlight_init(struct drm_device *dev)
231 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 233 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
232 continue; 234 continue;
233 235
234 switch (dev_priv->card_type) { 236 switch (device->card_type) {
235 case NV_40: 237 case NV_40:
236 return nv40_backlight_init(connector); 238 return nv40_backlight_init(connector);
237 case NV_50: 239 case NV_50:
@@ -248,10 +250,10 @@ nouveau_backlight_init(struct drm_device *dev)
248void 250void
249nouveau_backlight_exit(struct drm_device *dev) 251nouveau_backlight_exit(struct drm_device *dev)
250{ 252{
251 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
252 254
253 if (dev_priv->backlight) { 255 if (drm->backlight) {
254 backlight_device_unregister(dev_priv->backlight); 256 backlight_device_unregister(drm->backlight);
255 dev_priv->backlight = NULL; 257 drm->backlight = NULL;
256 } 258 }
257} 259}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a0a3fe3c016b..f6b7fa39d312 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,12 +22,13 @@
22 * SOFTWARE. 22 * SOFTWARE.
23 */ 23 */
24 24
25#include <subdev/bios.h>
26
25#include "drmP.h" 27#include "drmP.h"
26#define NV_DEBUG_NOTRACE 28#include "nouveau_drm.h"
27#include "nouveau_drv.h" 29#include "nouveau_reg.h"
28#include "nouveau_hw.h" 30#include "nouveau_hw.h"
29#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
30#include "nouveau_gpio.h"
31 32
32#include <linux/io-mapping.h> 33#include <linux/io-mapping.h>
33#include <linux/firmware.h> 34#include <linux/firmware.h>
@@ -65,3677 +66,6 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
65 return false; 66 return false;
66} 67}
67 68
68static int
69score_vbios(struct nvbios *bios, const bool writeable)
70{
71 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
72 NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
73 return 0;
74 }
75
76 if (nv_cksum(bios->data, bios->data[2] * 512)) {
77 NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
78 /* if a ro image is somewhat bad, it's probably all rubbish */
79 return writeable ? 2 : 1;
80 }
81
82 NV_TRACE(bios->dev, "... appears to be valid\n");
83 return 3;
84}
85
86static void
87bios_shadow_prom(struct nvbios *bios)
88{
89 struct drm_device *dev = bios->dev;
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 u32 pcireg, access;
92 u16 pcir;
93 int i;
94
95 /* enable access to rom */
96 if (dev_priv->card_type >= NV_50)
97 pcireg = 0x088050;
98 else
99 pcireg = NV_PBUS_PCI_NV_20;
100 access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
101
102 /* bail if no rom signature, with a workaround for a PROM reading
103 * issue on some chipsets. the first read after a period of
104 * inactivity returns the wrong result, so retry the first header
105 * byte a few times before giving up as a workaround
106 */
107 i = 16;
108 do {
109 if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
110 break;
111 } while (i--);
112
113 if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
114 goto out;
115
116 /* additional check (see note below) - read PCI record header */
117 pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
118 nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
119 if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
120 nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
121 nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
122 nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
123 goto out;
124
125 /* read entire bios image to system memory */
126 bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
127 bios->data = kmalloc(bios->length, GFP_KERNEL);
128 if (bios->data) {
129 for (i = 0; i < bios->length; i++)
130 bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
131 }
132
133out:
134 /* disable access to rom */
135 nv_wr32(dev, pcireg, access);
136}
137
138static void
139bios_shadow_pramin(struct nvbios *bios)
140{
141 struct drm_device *dev = bios->dev;
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 u32 bar0 = 0;
144 int i;
145
146 if (dev_priv->card_type >= NV_50) {
147 u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
148 if (!addr) {
149 addr = (u64)nv_rd32(dev, 0x001700) << 16;
150 addr += 0xf0000;
151 }
152
153 bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
154 }
155
156 /* bail if no rom signature */
157 if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
158 nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
159 goto out;
160
161 bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
162 bios->data = kmalloc(bios->length, GFP_KERNEL);
163 if (bios->data) {
164 for (i = 0; i < bios->length; i++)
165 bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
166 }
167
168out:
169 if (dev_priv->card_type >= NV_50)
170 nv_wr32(dev, 0x001700, bar0);
171}
172
173static void
174bios_shadow_pci(struct nvbios *bios)
175{
176 struct pci_dev *pdev = bios->dev->pdev;
177 size_t length;
178
179 if (!pci_enable_rom(pdev)) {
180 void __iomem *rom = pci_map_rom(pdev, &length);
181 if (rom && length) {
182 bios->data = kmalloc(length, GFP_KERNEL);
183 if (bios->data) {
184 memcpy_fromio(bios->data, rom, length);
185 bios->length = length;
186 }
187 }
188 if (rom)
189 pci_unmap_rom(pdev, rom);
190
191 pci_disable_rom(pdev);
192 }
193}
194
195static void
196bios_shadow_acpi(struct nvbios *bios)
197{
198 struct pci_dev *pdev = bios->dev->pdev;
199 int cnt = 65536 / ROM_BIOS_PAGE;
200 int ret;
201
202 if (!nouveau_acpi_rom_supported(pdev))
203 return;
204
205 bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
206 if (!bios->data)
207 return;
208
209 bios->length = 0;
210 while (cnt--) {
211 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
212 ROM_BIOS_PAGE);
213 if (ret != ROM_BIOS_PAGE)
214 return;
215
216 bios->length += ROM_BIOS_PAGE;
217 }
218}
219
220struct methods {
221 const char desc[8];
222 void (*shadow)(struct nvbios *);
223 const bool rw;
224 int score;
225 u32 size;
226 u8 *data;
227};
228
229static bool
230bios_shadow(struct drm_device *dev)
231{
232 struct methods shadow_methods[] = {
233 { "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
234 { "PROM", bios_shadow_prom, false, 0, 0, NULL },
235 { "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
236 { "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
237 {}
238 };
239 struct drm_nouveau_private *dev_priv = dev->dev_private;
240 struct nvbios *bios = &dev_priv->vbios;
241 struct methods *mthd, *best;
242 const struct firmware *fw;
243 char fname[32];
244 int ret;
245
246 if (nouveau_vbios) {
247 /* try to match one of the built-in methods */
248 mthd = shadow_methods;
249 do {
250 if (strcasecmp(nouveau_vbios, mthd->desc))
251 continue;
252 NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
253
254 mthd->shadow(bios);
255 mthd->score = score_vbios(bios, mthd->rw);
256 if (mthd->score)
257 return true;
258 } while ((++mthd)->shadow);
259
260 /* attempt to load firmware image */
261 snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
262 ret = request_firmware(&fw, fname, &dev->pdev->dev);
263 if (ret == 0) {
264 bios->length = fw->size;
265 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
266 release_firmware(fw);
267
268 NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
269 if (score_vbios(bios, 1))
270 return true;
271
272 kfree(bios->data);
273 bios->data = NULL;
274 }
275
276 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
277 }
278
279 mthd = shadow_methods;
280 do {
281 NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
282 mthd->shadow(bios);
283 mthd->score = score_vbios(bios, mthd->rw);
284 mthd->size = bios->length;
285 mthd->data = bios->data;
286 bios->data = NULL;
287 } while (mthd->score != 3 && (++mthd)->shadow);
288
289 mthd = shadow_methods;
290 best = mthd;
291 do {
292 if (mthd->score > best->score) {
293 kfree(best->data);
294 best = mthd;
295 }
296 } while ((++mthd)->shadow);
297
298 if (best->score) {
299 NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
300 bios->length = best->size;
301 bios->data = best->data;
302 return true;
303 }
304
305 NV_ERROR(dev, "No valid VBIOS image found\n");
306 return false;
307}
308
309struct init_tbl_entry {
310 char *name;
311 uint8_t id;
312 /* Return:
313 * > 0: success, length of opcode
314 * 0: success, but abort further parsing of table (INIT_DONE etc)
315 * < 0: failure, table parsing will be aborted
316 */
317 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
318};
319
320static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
321
322#define MACRO_INDEX_SIZE 2
323#define MACRO_SIZE 8
324#define CONDITION_SIZE 12
325#define IO_FLAG_CONDITION_SIZE 9
326#define IO_CONDITION_SIZE 5
327#define MEM_INIT_SIZE 66
328
329static void still_alive(void)
330{
331#if 0
332 sync();
333 mdelay(2);
334#endif
335}
336
337static uint32_t
338munge_reg(struct nvbios *bios, uint32_t reg)
339{
340 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
341 struct dcb_entry *dcbent = bios->display.output;
342
343 if (dev_priv->card_type < NV_50)
344 return reg;
345
346 if (reg & 0x80000000) {
347 BUG_ON(bios->display.crtc < 0);
348 reg += bios->display.crtc * 0x800;
349 }
350
351 if (reg & 0x40000000) {
352 BUG_ON(!dcbent);
353
354 reg += (ffs(dcbent->or) - 1) * 0x800;
355 if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
356 reg += 0x00000080;
357 }
358
359 reg &= ~0xe0000000;
360 return reg;
361}
362
363static int
364valid_reg(struct nvbios *bios, uint32_t reg)
365{
366 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
367 struct drm_device *dev = bios->dev;
368
369 /* C51 has misaligned regs on purpose. Marvellous */
370 if (reg & 0x2 ||
371 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
372 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
373
374 /* warn on C51 regs that haven't been verified accessible in tracing */
375 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
376 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
377 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
378 reg);
379
380 if (reg >= (8*1024*1024)) {
381 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
382 return 0;
383 }
384
385 return 1;
386}
387
388static bool
389valid_idx_port(struct nvbios *bios, uint16_t port)
390{
391 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
392 struct drm_device *dev = bios->dev;
393
394 /*
395 * If adding more ports here, the read/write functions below will need
396 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
397 * used for the port in question
398 */
399 if (dev_priv->card_type < NV_50) {
400 if (port == NV_CIO_CRX__COLOR)
401 return true;
402 if (port == NV_VIO_SRX)
403 return true;
404 } else {
405 if (port == NV_CIO_CRX__COLOR)
406 return true;
407 }
408
409 NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
410 port);
411
412 return false;
413}
414
415static bool
416valid_port(struct nvbios *bios, uint16_t port)
417{
418 struct drm_device *dev = bios->dev;
419
420 /*
421 * If adding more ports here, the read/write functions below will need
422 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
423 * used for the port in question
424 */
425 if (port == NV_VIO_VSE2)
426 return true;
427
428 NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
429
430 return false;
431}
432
433static uint32_t
434bios_rd32(struct nvbios *bios, uint32_t reg)
435{
436 uint32_t data;
437
438 reg = munge_reg(bios, reg);
439 if (!valid_reg(bios, reg))
440 return 0;
441
442 /*
443 * C51 sometimes uses regs with bit0 set in the address. For these
444 * cases there should exist a translation in a BIOS table to an IO
445 * port address which the BIOS uses for accessing the reg
446 *
447 * These only seem to appear for the power control regs to a flat panel,
448 * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
449 * for 0x1308 and 0x1310 are used - hence the mask below. An S3
450 * suspend-resume mmio trace from a C51 will be required to see if this
451 * is true for the power microcode in 0x14.., or whether the direct IO
452 * port access method is needed
453 */
454 if (reg & 0x1)
455 reg &= ~0x1;
456
457 data = nv_rd32(bios->dev, reg);
458
459 BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
460
461 return data;
462}
463
464static void
465bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
466{
467 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
468
469 reg = munge_reg(bios, reg);
470 if (!valid_reg(bios, reg))
471 return;
472
473 /* see note in bios_rd32 */
474 if (reg & 0x1)
475 reg &= 0xfffffffe;
476
477 LOG_OLD_VALUE(bios_rd32(bios, reg));
478 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
479
480 if (dev_priv->vbios.execute) {
481 still_alive();
482 nv_wr32(bios->dev, reg, data);
483 }
484}
485
486static uint8_t
487bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
488{
489 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
490 struct drm_device *dev = bios->dev;
491 uint8_t data;
492
493 if (!valid_idx_port(bios, port))
494 return 0;
495
496 if (dev_priv->card_type < NV_50) {
497 if (port == NV_VIO_SRX)
498 data = NVReadVgaSeq(dev, bios->state.crtchead, index);
499 else /* assume NV_CIO_CRX__COLOR */
500 data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
501 } else {
502 uint32_t data32;
503
504 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
505 data = (data32 >> ((index & 3) << 3)) & 0xff;
506 }
507
508 BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
509 "Head: 0x%02X, Data: 0x%02X\n",
510 port, index, bios->state.crtchead, data);
511 return data;
512}
513
514static void
515bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
516{
517 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
518 struct drm_device *dev = bios->dev;
519
520 if (!valid_idx_port(bios, port))
521 return;
522
523 /*
524 * The current head is maintained in the nvbios member state.crtchead.
525 * We trap changes to CR44 and update the head variable and hence the
526 * register set written.
527 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
528 * of the write, and to head1 after the write
529 */
530 if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
531 data != NV_CIO_CRE_44_HEADB)
532 bios->state.crtchead = 0;
533
534 LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
535 BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
536 "Head: 0x%02X, Data: 0x%02X\n",
537 port, index, bios->state.crtchead, data);
538
539 if (bios->execute && dev_priv->card_type < NV_50) {
540 still_alive();
541 if (port == NV_VIO_SRX)
542 NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
543 else /* assume NV_CIO_CRX__COLOR */
544 NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
545 } else
546 if (bios->execute) {
547 uint32_t data32, shift = (index & 3) << 3;
548
549 still_alive();
550
551 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
552 data32 &= ~(0xff << shift);
553 data32 |= (data << shift);
554 bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
555 }
556
557 if (port == NV_CIO_CRX__COLOR &&
558 index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
559 bios->state.crtchead = 1;
560}
561
562static uint8_t
563bios_port_rd(struct nvbios *bios, uint16_t port)
564{
565 uint8_t data, head = bios->state.crtchead;
566
567 if (!valid_port(bios, port))
568 return 0;
569
570 data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
571
572 BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
573 port, head, data);
574
575 return data;
576}
577
578static void
579bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
580{
581 int head = bios->state.crtchead;
582
583 if (!valid_port(bios, port))
584 return;
585
586 LOG_OLD_VALUE(bios_port_rd(bios, port));
587 BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
588 port, head, data);
589
590 if (!bios->execute)
591 return;
592
593 still_alive();
594 NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
595}
596
597static bool
598io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
599{
600 /*
601 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
602 * for the CRTC index; 1 byte for the mask to apply to the value
603 * retrieved from the CRTC; 1 byte for the shift right to apply to the
604 * masked CRTC value; 2 bytes for the offset to the flag array, to
605 * which the shifted value is added; 1 byte for the mask applied to the
606 * value read from the flag array; and 1 byte for the value to compare
607 * against the masked byte from the flag table.
608 */
609
610 uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
611 uint16_t crtcport = ROM16(bios->data[condptr]);
612 uint8_t crtcindex = bios->data[condptr + 2];
613 uint8_t mask = bios->data[condptr + 3];
614 uint8_t shift = bios->data[condptr + 4];
615 uint16_t flagarray = ROM16(bios->data[condptr + 5]);
616 uint8_t flagarraymask = bios->data[condptr + 7];
617 uint8_t cmpval = bios->data[condptr + 8];
618 uint8_t data;
619
620 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
621 "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
622 "Cmpval: 0x%02X\n",
623 offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
624
625 data = bios_idxprt_rd(bios, crtcport, crtcindex);
626
627 data = bios->data[flagarray + ((data & mask) >> shift)];
628 data &= flagarraymask;
629
630 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
631 offset, data, cmpval);
632
633 return (data == cmpval);
634}
635
636static bool
637bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
638{
639 /*
640 * The condition table entry has 4 bytes for the address of the
641 * register to check, 4 bytes for a mask to apply to the register and
642 * 4 for a test comparison value
643 */
644
645 uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
646 uint32_t reg = ROM32(bios->data[condptr]);
647 uint32_t mask = ROM32(bios->data[condptr + 4]);
648 uint32_t cmpval = ROM32(bios->data[condptr + 8]);
649 uint32_t data;
650
651 BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
652 offset, cond, reg, mask);
653
654 data = bios_rd32(bios, reg) & mask;
655
656 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
657 offset, data, cmpval);
658
659 return (data == cmpval);
660}
661
662static bool
663io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
664{
665 /*
666 * The IO condition entry has 2 bytes for the IO port address; 1 byte
667 * for the index to write to io_port; 1 byte for the mask to apply to
668 * the byte read from io_port+1; and 1 byte for the value to compare
669 * against the masked byte.
670 */
671
672 uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
673 uint16_t io_port = ROM16(bios->data[condptr]);
674 uint8_t port_index = bios->data[condptr + 2];
675 uint8_t mask = bios->data[condptr + 3];
676 uint8_t cmpval = bios->data[condptr + 4];
677
678 uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
679
680 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
681 offset, data, cmpval);
682
683 return (data == cmpval);
684}
685
686static int
687nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
688{
689 struct drm_nouveau_private *dev_priv = dev->dev_private;
690 struct nouveau_pll_vals pll;
691 struct pll_lims pll_limits;
692 u32 ctrl, mask, coef;
693 int ret;
694
695 ret = get_pll_limits(dev, reg, &pll_limits);
696 if (ret)
697 return ret;
698
699 clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
700 if (!clk)
701 return -ERANGE;
702
703 coef = pll.N1 << 8 | pll.M1;
704 ctrl = pll.log2P << 16;
705 mask = 0x00070000;
706 if (reg == 0x004008) {
707 mask |= 0x01f80000;
708 ctrl |= (pll_limits.log2p_bias << 19);
709 ctrl |= (pll.log2P << 22);
710 }
711
712 if (!dev_priv->vbios.execute)
713 return 0;
714
715 nv_mask(dev, reg + 0, mask, ctrl);
716 nv_wr32(dev, reg + 4, coef);
717 return 0;
718}
719
720static int
721setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
722{
723 struct drm_device *dev = bios->dev;
724 struct drm_nouveau_private *dev_priv = dev->dev_private;
725 /* clk in kHz */
726 struct pll_lims pll_lim;
727 struct nouveau_pll_vals pllvals;
728 int ret;
729
730 if (dev_priv->card_type >= NV_50)
731 return nv50_pll_set(dev, reg, clk);
732
733 /* high regs (such as in the mac g5 table) are not -= 4 */
734 ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
735 if (ret)
736 return ret;
737
738 clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
739 if (!clk)
740 return -ERANGE;
741
742 if (bios->execute) {
743 still_alive();
744 nouveau_hw_setpll(dev, reg, &pllvals);
745 }
746
747 return 0;
748}
749
750static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
751{
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nvbios *bios = &dev_priv->vbios;
754
755 /*
756 * For the results of this function to be correct, CR44 must have been
757 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
758 * and the DCB table parsed, before the script calling the function is
759 * run. run_digital_op_script is example of how to do such setup
760 */
761
762 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
763
764 if (dcb_entry > bios->dcb.entries) {
765 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
766 "(%02X)\n", dcb_entry);
767 dcb_entry = 0x7f; /* unused / invalid marker */
768 }
769
770 return dcb_entry;
771}
772
773static struct nouveau_i2c_chan *
774init_i2c_device_find(struct drm_device *dev, int i2c_index)
775{
776 if (i2c_index == 0xff) {
777 struct drm_nouveau_private *dev_priv = dev->dev_private;
778 struct dcb_table *dcb = &dev_priv->vbios.dcb;
779 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
780 int idx = dcb_entry_idx_from_crtchead(dev);
781
782 i2c_index = NV_I2C_DEFAULT(0);
783 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
784 i2c_index = NV_I2C_DEFAULT(1);
785 }
786
787 return nouveau_i2c_find(dev, i2c_index);
788}
789
790static uint32_t
791get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
792{
793 /*
794 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
795 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
796 * CR58 for CR57 = 0 to index a table of offsets to the basic
797 * 0x6808b0 address.
798 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
799 * CR58 for CR57 = 0 to index a table of offsets to the basic
800 * 0x6808b0 address, and then flip the offset by 8.
801 */
802
803 struct drm_nouveau_private *dev_priv = dev->dev_private;
804 struct nvbios *bios = &dev_priv->vbios;
805 const int pramdac_offset[13] = {
806 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
807 const uint32_t pramdac_table[4] = {
808 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
809
810 if (mlv >= 0x80) {
811 int dcb_entry, dacoffset;
812
813 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
814 dcb_entry = dcb_entry_idx_from_crtchead(dev);
815 if (dcb_entry == 0x7f)
816 return 0;
817 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
818 if (mlv == 0x81)
819 dacoffset ^= 8;
820 return 0x6808b0 + dacoffset;
821 } else {
822 if (mlv >= ARRAY_SIZE(pramdac_table)) {
823 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
824 mlv);
825 return 0;
826 }
827 return pramdac_table[mlv];
828 }
829}
830
831static int
832init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
833 struct init_exec *iexec)
834{
835 /*
836 * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
837 *
838 * offset (8 bit): opcode
839 * offset + 1 (16 bit): CRTC port
840 * offset + 3 (8 bit): CRTC index
841 * offset + 4 (8 bit): mask
842 * offset + 5 (8 bit): shift
843 * offset + 6 (8 bit): count
844 * offset + 7 (32 bit): register
845 * offset + 11 (32 bit): configuration 1
846 * ...
847 *
848 * Starting at offset + 11 there are "count" 32 bit values.
849 * To find out which value to use read index "CRTC index" on "CRTC
850 * port", AND this value with "mask" and then bit shift right "shift"
851 * bits. Read the appropriate value using this index and write to
852 * "register"
853 */
854
855 uint16_t crtcport = ROM16(bios->data[offset + 1]);
856 uint8_t crtcindex = bios->data[offset + 3];
857 uint8_t mask = bios->data[offset + 4];
858 uint8_t shift = bios->data[offset + 5];
859 uint8_t count = bios->data[offset + 6];
860 uint32_t reg = ROM32(bios->data[offset + 7]);
861 uint8_t config;
862 uint32_t configval;
863 int len = 11 + count * 4;
864
865 if (!iexec->execute)
866 return len;
867
868 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
869 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
870 offset, crtcport, crtcindex, mask, shift, count, reg);
871
872 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
873 if (config > count) {
874 NV_ERROR(bios->dev,
875 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
876 offset, config, count);
877 return len;
878 }
879
880 configval = ROM32(bios->data[offset + 11 + config * 4]);
881
882 BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
883
884 bios_wr32(bios, reg, configval);
885
886 return len;
887}
888
889static int
890init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
891{
892 /*
893 * INIT_REPEAT opcode: 0x33 ('3')
894 *
895 * offset (8 bit): opcode
896 * offset + 1 (8 bit): count
897 *
898 * Execute script following this opcode up to INIT_REPEAT_END
899 * "count" times
900 */
901
902 uint8_t count = bios->data[offset + 1];
903 uint8_t i;
904
905 /* no iexec->execute check by design */
906
907 BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
908 offset, count);
909
910 iexec->repeat = true;
911
912 /*
913 * count - 1, as the script block will execute once when we leave this
914 * opcode -- this is compatible with bios behaviour as:
915 * a) the block is always executed at least once, even if count == 0
916 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
917 * while we don't
918 */
919 for (i = 0; i < count - 1; i++)
920 parse_init_table(bios, offset + 2, iexec);
921
922 iexec->repeat = false;
923
924 return 2;
925}
926
927static int
928init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
929 struct init_exec *iexec)
930{
931 /*
932 * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
933 *
934 * offset (8 bit): opcode
935 * offset + 1 (16 bit): CRTC port
936 * offset + 3 (8 bit): CRTC index
937 * offset + 4 (8 bit): mask
938 * offset + 5 (8 bit): shift
939 * offset + 6 (8 bit): IO flag condition index
940 * offset + 7 (8 bit): count
941 * offset + 8 (32 bit): register
942 * offset + 12 (16 bit): frequency 1
943 * ...
944 *
945 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
946 * Set PLL register "register" to coefficients for frequency n,
947 * selected by reading index "CRTC index" of "CRTC port" ANDed with
948 * "mask" and shifted right by "shift".
949 *
950 * If "IO flag condition index" > 0, and condition met, double
951 * frequency before setting it.
952 */
953
954 uint16_t crtcport = ROM16(bios->data[offset + 1]);
955 uint8_t crtcindex = bios->data[offset + 3];
956 uint8_t mask = bios->data[offset + 4];
957 uint8_t shift = bios->data[offset + 5];
958 int8_t io_flag_condition_idx = bios->data[offset + 6];
959 uint8_t count = bios->data[offset + 7];
960 uint32_t reg = ROM32(bios->data[offset + 8]);
961 uint8_t config;
962 uint16_t freq;
963 int len = 12 + count * 2;
964
965 if (!iexec->execute)
966 return len;
967
968 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
969 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
970 "Count: 0x%02X, Reg: 0x%08X\n",
971 offset, crtcport, crtcindex, mask, shift,
972 io_flag_condition_idx, count, reg);
973
974 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
975 if (config > count) {
976 NV_ERROR(bios->dev,
977 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
978 offset, config, count);
979 return len;
980 }
981
982 freq = ROM16(bios->data[offset + 12 + config * 2]);
983
984 if (io_flag_condition_idx > 0) {
985 if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
986 BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
987 "frequency doubled\n", offset);
988 freq *= 2;
989 } else
990 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
991 "frequency unchanged\n", offset);
992 }
993
994 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
995 offset, reg, config, freq);
996
997 setPLL(bios, reg, freq * 10);
998
999 return len;
1000}
1001
1002static int
1003init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1004{
1005 /*
1006 * INIT_END_REPEAT opcode: 0x36 ('6')
1007 *
1008 * offset (8 bit): opcode
1009 *
1010 * Marks the end of the block for INIT_REPEAT to repeat
1011 */
1012
1013 /* no iexec->execute check by design */
1014
1015 /*
1016 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
1017 * we're not in repeat mode
1018 */
1019 if (iexec->repeat)
1020 return 0;
1021
1022 return 1;
1023}
1024
1025static int
1026init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1027{
1028 /*
1029 * INIT_COPY opcode: 0x37 ('7')
1030 *
1031 * offset (8 bit): opcode
1032 * offset + 1 (32 bit): register
1033 * offset + 5 (8 bit): shift
1034 * offset + 6 (8 bit): srcmask
1035 * offset + 7 (16 bit): CRTC port
1036 * offset + 9 (8 bit): CRTC index
1037 * offset + 10 (8 bit): mask
1038 *
1039 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
1040 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
1041 * port
1042 */
1043
1044 uint32_t reg = ROM32(bios->data[offset + 1]);
1045 uint8_t shift = bios->data[offset + 5];
1046 uint8_t srcmask = bios->data[offset + 6];
1047 uint16_t crtcport = ROM16(bios->data[offset + 7]);
1048 uint8_t crtcindex = bios->data[offset + 9];
1049 uint8_t mask = bios->data[offset + 10];
1050 uint32_t data;
1051 uint8_t crtcdata;
1052
1053 if (!iexec->execute)
1054 return 11;
1055
1056 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1057 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
1058 offset, reg, shift, srcmask, crtcport, crtcindex, mask);
1059
1060 data = bios_rd32(bios, reg);
1061
1062 if (shift < 0x80)
1063 data >>= shift;
1064 else
1065 data <<= (0x100 - shift);
1066
1067 data &= srcmask;
1068
1069 crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
1070 crtcdata |= (uint8_t)data;
1071 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1072
1073 return 11;
1074}
1075
1076static int
1077init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1078{
1079 /*
1080 * INIT_NOT opcode: 0x38 ('8')
1081 *
1082 * offset (8 bit): opcode
1083 *
1084 * Invert the current execute / no-execute condition (i.e. "else")
1085 */
1086 if (iexec->execute)
1087 BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
1088 else
1089 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1090
1091 iexec->execute = !iexec->execute;
1092 return 1;
1093}
1094
1095static int
1096init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1097 struct init_exec *iexec)
1098{
1099 /*
1100 * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
1101 *
1102 * offset (8 bit): opcode
1103 * offset + 1 (8 bit): condition number
1104 *
1105 * Check condition "condition number" in the IO flag condition table.
1106 * If condition not met skip subsequent opcodes until condition is
1107 * inverted (INIT_NOT), or we hit INIT_RESUME
1108 */
1109
1110 uint8_t cond = bios->data[offset + 1];
1111
1112 if (!iexec->execute)
1113 return 2;
1114
1115 if (io_flag_condition_met(bios, offset, cond))
1116 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
1117 else {
1118 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
1119 iexec->execute = false;
1120 }
1121
1122 return 2;
1123}
1124
1125static int
1126init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1127{
1128 /*
1129 * INIT_DP_CONDITION opcode: 0x3A ('')
1130 *
1131 * offset (8 bit): opcode
1132 * offset + 1 (8 bit): "sub" opcode
1133 * offset + 2 (8 bit): unknown
1134 *
1135 */
1136
1137 struct dcb_entry *dcb = bios->display.output;
1138 struct drm_device *dev = bios->dev;
1139 uint8_t cond = bios->data[offset + 1];
1140 uint8_t *table, *entry;
1141
1142 BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
1143
1144 if (!iexec->execute)
1145 return 3;
1146
1147 table = nouveau_dp_bios_data(dev, dcb, &entry);
1148 if (!table)
1149 return 3;
1150
1151 switch (cond) {
1152 case 0:
1153 entry = dcb_conn(dev, dcb->connector);
1154 if (!entry || entry[0] != DCB_CONNECTOR_eDP)
1155 iexec->execute = false;
1156 break;
1157 case 1:
1158 case 2:
1159 if ((table[0] < 0x40 && !(entry[5] & cond)) ||
1160 (table[0] == 0x40 && !(entry[4] & cond)))
1161 iexec->execute = false;
1162 break;
1163 case 5:
1164 {
1165 struct nouveau_i2c_chan *auxch;
1166 int ret;
1167
1168 auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
1169 if (!auxch) {
1170 NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset);
1171 return 3;
1172 }
1173
1174 ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
1175 if (ret) {
1176 NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret);
1177 return 3;
1178 }
1179
1180 if (!(cond & 1))
1181 iexec->execute = false;
1182 }
1183 break;
1184 default:
1185 NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
1186 break;
1187 }
1188
1189 if (iexec->execute)
1190 BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
1191 else
1192 BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
1193
1194 return 3;
1195}
1196
1197static int
1198init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1199{
1200 /*
1201 * INIT_3B opcode: 0x3B ('')
1202 *
1203 * offset (8 bit): opcode
1204 * offset + 1 (8 bit): crtc index
1205 *
1206 */
1207
1208 uint8_t or = ffs(bios->display.output->or) - 1;
1209 uint8_t index = bios->data[offset + 1];
1210 uint8_t data;
1211
1212 if (!iexec->execute)
1213 return 2;
1214
1215 data = bios_idxprt_rd(bios, 0x3d4, index);
1216 bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
1217 return 2;
1218}
1219
1220static int
1221init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1222{
1223 /*
1224 * INIT_3C opcode: 0x3C ('')
1225 *
1226 * offset (8 bit): opcode
1227 * offset + 1 (8 bit): crtc index
1228 *
1229 */
1230
1231 uint8_t or = ffs(bios->display.output->or) - 1;
1232 uint8_t index = bios->data[offset + 1];
1233 uint8_t data;
1234
1235 if (!iexec->execute)
1236 return 2;
1237
1238 data = bios_idxprt_rd(bios, 0x3d4, index);
1239 bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
1240 return 2;
1241}
1242
1243static int
1244init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1245 struct init_exec *iexec)
1246{
1247 /*
1248 * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
1249 *
1250 * offset (8 bit): opcode
1251 * offset + 1 (32 bit): control register
1252 * offset + 5 (32 bit): data register
1253 * offset + 9 (32 bit): mask
1254 * offset + 13 (32 bit): data
1255 * offset + 17 (8 bit): count
1256 * offset + 18 (8 bit): address 1
1257 * offset + 19 (8 bit): data 1
1258 * ...
1259 *
1260 * For each of "count" address and data pairs, write "data n" to
1261 * "data register", read the current value of "control register",
1262 * and write it back once ANDed with "mask", ORed with "data",
1263 * and ORed with "address n"
1264 */
1265
1266 uint32_t controlreg = ROM32(bios->data[offset + 1]);
1267 uint32_t datareg = ROM32(bios->data[offset + 5]);
1268 uint32_t mask = ROM32(bios->data[offset + 9]);
1269 uint32_t data = ROM32(bios->data[offset + 13]);
1270 uint8_t count = bios->data[offset + 17];
1271 int len = 18 + count * 2;
1272 uint32_t value;
1273 int i;
1274
1275 if (!iexec->execute)
1276 return len;
1277
1278 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1279 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
1280 offset, controlreg, datareg, mask, data, count);
1281
1282 for (i = 0; i < count; i++) {
1283 uint8_t instaddress = bios->data[offset + 18 + i * 2];
1284 uint8_t instdata = bios->data[offset + 19 + i * 2];
1285
1286 BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
1287 offset, instaddress, instdata);
1288
1289 bios_wr32(bios, datareg, instdata);
1290 value = bios_rd32(bios, controlreg) & mask;
1291 value |= data;
1292 value |= instaddress;
1293 bios_wr32(bios, controlreg, value);
1294 }
1295
1296 return len;
1297}
1298
1299static int
1300init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1301 struct init_exec *iexec)
1302{
1303 /*
1304 * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
1305 *
1306 * offset (8 bit): opcode
1307 * offset + 1 (16 bit): CRTC port
1308 * offset + 3 (8 bit): CRTC index
1309 * offset + 4 (8 bit): mask
1310 * offset + 5 (8 bit): shift
1311 * offset + 6 (8 bit): count
1312 * offset + 7 (32 bit): register
1313 * offset + 11 (32 bit): frequency 1
1314 * ...
1315 *
1316 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
1317 * Set PLL register "register" to coefficients for frequency n,
1318 * selected by reading index "CRTC index" of "CRTC port" ANDed with
1319 * "mask" and shifted right by "shift".
1320 */
1321
1322 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1323 uint8_t crtcindex = bios->data[offset + 3];
1324 uint8_t mask = bios->data[offset + 4];
1325 uint8_t shift = bios->data[offset + 5];
1326 uint8_t count = bios->data[offset + 6];
1327 uint32_t reg = ROM32(bios->data[offset + 7]);
1328 int len = 11 + count * 4;
1329 uint8_t config;
1330 uint32_t freq;
1331
1332 if (!iexec->execute)
1333 return len;
1334
1335 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1336 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1337 offset, crtcport, crtcindex, mask, shift, count, reg);
1338
1339 if (!reg)
1340 return len;
1341
1342 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1343 if (config > count) {
1344 NV_ERROR(bios->dev,
1345 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1346 offset, config, count);
1347 return len;
1348 }
1349
1350 freq = ROM32(bios->data[offset + 11 + config * 4]);
1351
1352 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
1353 offset, reg, config, freq);
1354
1355 setPLL(bios, reg, freq);
1356
1357 return len;
1358}
1359
1360static int
1361init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1362{
1363 /*
1364 * INIT_PLL2 opcode: 0x4B ('K')
1365 *
1366 * offset (8 bit): opcode
1367 * offset + 1 (32 bit): register
1368 * offset + 5 (32 bit): freq
1369 *
1370 * Set PLL register "register" to coefficients for frequency "freq"
1371 */
1372
1373 uint32_t reg = ROM32(bios->data[offset + 1]);
1374 uint32_t freq = ROM32(bios->data[offset + 5]);
1375
1376 if (!iexec->execute)
1377 return 9;
1378
1379 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1380 offset, reg, freq);
1381
1382 setPLL(bios, reg, freq);
1383 return 9;
1384}
1385
1386static int
1387init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1388{
1389 /*
1390 * INIT_I2C_BYTE opcode: 0x4C ('L')
1391 *
1392 * offset (8 bit): opcode
1393 * offset + 1 (8 bit): DCB I2C table entry index
1394 * offset + 2 (8 bit): I2C slave address
1395 * offset + 3 (8 bit): count
1396 * offset + 4 (8 bit): I2C register 1
1397 * offset + 5 (8 bit): mask 1
1398 * offset + 6 (8 bit): data 1
1399 * ...
1400 *
1401 * For each of "count" registers given by "I2C register n" on the device
1402 * addressed by "I2C slave address" on the I2C bus given by
1403 * "DCB I2C table entry index", read the register, AND the result with
1404 * "mask n" and OR it with "data n" before writing it back to the device
1405 */
1406
1407 struct drm_device *dev = bios->dev;
1408 uint8_t i2c_index = bios->data[offset + 1];
1409 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1410 uint8_t count = bios->data[offset + 3];
1411 struct nouveau_i2c_chan *chan;
1412 int len = 4 + count * 3;
1413 int ret, i;
1414
1415 if (!iexec->execute)
1416 return len;
1417
1418 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1419 "Count: 0x%02X\n",
1420 offset, i2c_index, i2c_address, count);
1421
1422 chan = init_i2c_device_find(dev, i2c_index);
1423 if (!chan) {
1424 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1425 return len;
1426 }
1427
1428 for (i = 0; i < count; i++) {
1429 uint8_t reg = bios->data[offset + 4 + i * 3];
1430 uint8_t mask = bios->data[offset + 5 + i * 3];
1431 uint8_t data = bios->data[offset + 6 + i * 3];
1432 union i2c_smbus_data val;
1433
1434 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1435 I2C_SMBUS_READ, reg,
1436 I2C_SMBUS_BYTE_DATA, &val);
1437 if (ret < 0) {
1438 NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret);
1439 return len;
1440 }
1441
1442 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1443 "Mask: 0x%02X, Data: 0x%02X\n",
1444 offset, reg, val.byte, mask, data);
1445
1446 if (!bios->execute)
1447 continue;
1448
1449 val.byte &= mask;
1450 val.byte |= data;
1451 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1452 I2C_SMBUS_WRITE, reg,
1453 I2C_SMBUS_BYTE_DATA, &val);
1454 if (ret < 0) {
1455 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1456 return len;
1457 }
1458 }
1459
1460 return len;
1461}
1462
1463static int
1464init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1465{
1466 /*
1467 * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
1468 *
1469 * offset (8 bit): opcode
1470 * offset + 1 (8 bit): DCB I2C table entry index
1471 * offset + 2 (8 bit): I2C slave address
1472 * offset + 3 (8 bit): count
1473 * offset + 4 (8 bit): I2C register 1
1474 * offset + 5 (8 bit): data 1
1475 * ...
1476 *
1477 * For each of "count" registers given by "I2C register n" on the device
1478 * addressed by "I2C slave address" on the I2C bus given by
1479 * "DCB I2C table entry index", set the register to "data n"
1480 */
1481
1482 struct drm_device *dev = bios->dev;
1483 uint8_t i2c_index = bios->data[offset + 1];
1484 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1485 uint8_t count = bios->data[offset + 3];
1486 struct nouveau_i2c_chan *chan;
1487 int len = 4 + count * 2;
1488 int ret, i;
1489
1490 if (!iexec->execute)
1491 return len;
1492
1493 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1494 "Count: 0x%02X\n",
1495 offset, i2c_index, i2c_address, count);
1496
1497 chan = init_i2c_device_find(dev, i2c_index);
1498 if (!chan) {
1499 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1500 return len;
1501 }
1502
1503 for (i = 0; i < count; i++) {
1504 uint8_t reg = bios->data[offset + 4 + i * 2];
1505 union i2c_smbus_data val;
1506
1507 val.byte = bios->data[offset + 5 + i * 2];
1508
1509 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1510 offset, reg, val.byte);
1511
1512 if (!bios->execute)
1513 continue;
1514
1515 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1516 I2C_SMBUS_WRITE, reg,
1517 I2C_SMBUS_BYTE_DATA, &val);
1518 if (ret < 0) {
1519 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1520 return len;
1521 }
1522 }
1523
1524 return len;
1525}
1526
1527static int
1528init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1529{
1530 /*
1531 * INIT_ZM_I2C opcode: 0x4E ('N')
1532 *
1533 * offset (8 bit): opcode
1534 * offset + 1 (8 bit): DCB I2C table entry index
1535 * offset + 2 (8 bit): I2C slave address
1536 * offset + 3 (8 bit): count
1537 * offset + 4 (8 bit): data 1
1538 * ...
1539 *
1540 * Send "count" bytes ("data n") to the device addressed by "I2C slave
1541 * address" on the I2C bus given by "DCB I2C table entry index"
1542 */
1543
1544 struct drm_device *dev = bios->dev;
1545 uint8_t i2c_index = bios->data[offset + 1];
1546 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1547 uint8_t count = bios->data[offset + 3];
1548 int len = 4 + count;
1549 struct nouveau_i2c_chan *chan;
1550 struct i2c_msg msg;
1551 uint8_t data[256];
1552 int ret, i;
1553
1554 if (!iexec->execute)
1555 return len;
1556
1557 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1558 "Count: 0x%02X\n",
1559 offset, i2c_index, i2c_address, count);
1560
1561 chan = init_i2c_device_find(dev, i2c_index);
1562 if (!chan) {
1563 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1564 return len;
1565 }
1566
1567 for (i = 0; i < count; i++) {
1568 data[i] = bios->data[offset + 4 + i];
1569
1570 BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
1571 }
1572
1573 if (bios->execute) {
1574 msg.addr = i2c_address;
1575 msg.flags = 0;
1576 msg.len = count;
1577 msg.buf = data;
1578 ret = i2c_transfer(&chan->adapter, &msg, 1);
1579 if (ret != 1) {
1580 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1581 return len;
1582 }
1583 }
1584
1585 return len;
1586}
1587
1588static int
1589init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1590{
1591 /*
1592 * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
1593 *
1594 * offset (8 bit): opcode
1595 * offset + 1 (8 bit): magic lookup value
1596 * offset + 2 (8 bit): TMDS address
1597 * offset + 3 (8 bit): mask
1598 * offset + 4 (8 bit): data
1599 *
1600 * Read the data reg for TMDS address "TMDS address", AND it with mask
1601 * and OR it with data, then write it back
1602 * "magic lookup value" determines which TMDS base address register is
1603 * used -- see get_tmds_index_reg()
1604 */
1605
1606 struct drm_device *dev = bios->dev;
1607 uint8_t mlv = bios->data[offset + 1];
1608 uint32_t tmdsaddr = bios->data[offset + 2];
1609 uint8_t mask = bios->data[offset + 3];
1610 uint8_t data = bios->data[offset + 4];
1611 uint32_t reg, value;
1612
1613 if (!iexec->execute)
1614 return 5;
1615
1616 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1617 "Mask: 0x%02X, Data: 0x%02X\n",
1618 offset, mlv, tmdsaddr, mask, data);
1619
1620 reg = get_tmds_index_reg(bios->dev, mlv);
1621 if (!reg) {
1622 NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
1623 return 5;
1624 }
1625
1626 bios_wr32(bios, reg,
1627 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
1628 value = (bios_rd32(bios, reg + 4) & mask) | data;
1629 bios_wr32(bios, reg + 4, value);
1630 bios_wr32(bios, reg, tmdsaddr);
1631
1632 return 5;
1633}
1634
1635static int
1636init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1637 struct init_exec *iexec)
1638{
1639 /*
1640 * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
1641 *
1642 * offset (8 bit): opcode
1643 * offset + 1 (8 bit): magic lookup value
1644 * offset + 2 (8 bit): count
1645 * offset + 3 (8 bit): addr 1
1646 * offset + 4 (8 bit): data 1
1647 * ...
1648 *
1649 * For each of "count" TMDS address and data pairs write "data n" to
1650 * "addr n". "magic lookup value" determines which TMDS base address
1651 * register is used -- see get_tmds_index_reg()
1652 */
1653
1654 struct drm_device *dev = bios->dev;
1655 uint8_t mlv = bios->data[offset + 1];
1656 uint8_t count = bios->data[offset + 2];
1657 int len = 3 + count * 2;
1658 uint32_t reg;
1659 int i;
1660
1661 if (!iexec->execute)
1662 return len;
1663
1664 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1665 offset, mlv, count);
1666
1667 reg = get_tmds_index_reg(bios->dev, mlv);
1668 if (!reg) {
1669 NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
1670 return len;
1671 }
1672
1673 for (i = 0; i < count; i++) {
1674 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
1675 uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
1676
1677 bios_wr32(bios, reg + 4, tmdsdata);
1678 bios_wr32(bios, reg, tmdsaddr);
1679 }
1680
1681 return len;
1682}
1683
1684static int
1685init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1686 struct init_exec *iexec)
1687{
1688 /*
1689 * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
1690 *
1691 * offset (8 bit): opcode
1692 * offset + 1 (8 bit): CRTC index1
1693 * offset + 2 (8 bit): CRTC index2
1694 * offset + 3 (8 bit): baseaddr
1695 * offset + 4 (8 bit): count
1696 * offset + 5 (8 bit): data 1
1697 * ...
1698 *
1699 * For each of "count" address and data pairs, write "baseaddr + n" to
1700 * "CRTC index1" and "data n" to "CRTC index2"
1701 * Once complete, restore initial value read from "CRTC index1"
1702 */
1703 uint8_t crtcindex1 = bios->data[offset + 1];
1704 uint8_t crtcindex2 = bios->data[offset + 2];
1705 uint8_t baseaddr = bios->data[offset + 3];
1706 uint8_t count = bios->data[offset + 4];
1707 int len = 5 + count;
1708 uint8_t oldaddr, data;
1709 int i;
1710
1711 if (!iexec->execute)
1712 return len;
1713
1714 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1715 "BaseAddr: 0x%02X, Count: 0x%02X\n",
1716 offset, crtcindex1, crtcindex2, baseaddr, count);
1717
1718 oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
1719
1720 for (i = 0; i < count; i++) {
1721 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
1722 baseaddr + i);
1723 data = bios->data[offset + 5 + i];
1724 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
1725 }
1726
1727 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1728
1729 return len;
1730}
1731
1732static int
1733init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1734{
1735 /*
1736 * INIT_CR opcode: 0x52 ('R')
1737 *
1738 * offset (8 bit): opcode
1739 * offset + 1 (8 bit): CRTC index
1740 * offset + 2 (8 bit): mask
1741 * offset + 3 (8 bit): data
1742 *
1743 * Assign the value of at "CRTC index" ANDed with mask and ORed with
1744 * data back to "CRTC index"
1745 */
1746
1747 uint8_t crtcindex = bios->data[offset + 1];
1748 uint8_t mask = bios->data[offset + 2];
1749 uint8_t data = bios->data[offset + 3];
1750 uint8_t value;
1751
1752 if (!iexec->execute)
1753 return 4;
1754
1755 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1756 offset, crtcindex, mask, data);
1757
1758 value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
1759 value |= data;
1760 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1761
1762 return 4;
1763}
1764
1765static int
1766init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1767{
1768 /*
1769 * INIT_ZM_CR opcode: 0x53 ('S')
1770 *
1771 * offset (8 bit): opcode
1772 * offset + 1 (8 bit): CRTC index
1773 * offset + 2 (8 bit): value
1774 *
1775 * Assign "value" to CRTC register with index "CRTC index".
1776 */
1777
1778 uint8_t crtcindex = ROM32(bios->data[offset + 1]);
1779 uint8_t data = bios->data[offset + 2];
1780
1781 if (!iexec->execute)
1782 return 3;
1783
1784 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1785
1786 return 3;
1787}
1788
1789static int
1790init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1791{
1792 /*
1793 * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
1794 *
1795 * offset (8 bit): opcode
1796 * offset + 1 (8 bit): count
1797 * offset + 2 (8 bit): CRTC index 1
1798 * offset + 3 (8 bit): value 1
1799 * ...
1800 *
1801 * For "count", assign "value n" to CRTC register with index
1802 * "CRTC index n".
1803 */
1804
1805 uint8_t count = bios->data[offset + 1];
1806 int len = 2 + count * 2;
1807 int i;
1808
1809 if (!iexec->execute)
1810 return len;
1811
1812 for (i = 0; i < count; i++)
1813 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1814
1815 return len;
1816}
1817
1818static int
1819init_condition_time(struct nvbios *bios, uint16_t offset,
1820 struct init_exec *iexec)
1821{
1822 /*
1823 * INIT_CONDITION_TIME opcode: 0x56 ('V')
1824 *
1825 * offset (8 bit): opcode
1826 * offset + 1 (8 bit): condition number
1827 * offset + 2 (8 bit): retries / 50
1828 *
1829 * Check condition "condition number" in the condition table.
1830 * Bios code then sleeps for 2ms if the condition is not met, and
1831 * repeats up to "retries" times, but on one C51 this has proved
1832 * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
1833 * this, and bail after "retries" times, or 2s, whichever is less.
1834 * If still not met after retries, clear execution flag for this table.
1835 */
1836
1837 uint8_t cond = bios->data[offset + 1];
1838 uint16_t retries = bios->data[offset + 2] * 50;
1839 unsigned cnt;
1840
1841 if (!iexec->execute)
1842 return 3;
1843
1844 if (retries > 100)
1845 retries = 100;
1846
1847 BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
1848 offset, cond, retries);
1849
1850 if (!bios->execute) /* avoid 2s delays when "faking" execution */
1851 retries = 1;
1852
1853 for (cnt = 0; cnt < retries; cnt++) {
1854 if (bios_condition_met(bios, offset, cond)) {
1855 BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
1856 offset);
1857 break;
1858 } else {
1859 BIOSLOG(bios, "0x%04X: "
1860 "Condition not met, sleeping for 20ms\n",
1861 offset);
1862 mdelay(20);
1863 }
1864 }
1865
1866 if (!bios_condition_met(bios, offset, cond)) {
1867 NV_WARN(bios->dev,
1868 "0x%04X: Condition still not met after %dms, "
1869 "skipping following opcodes\n", offset, 20 * retries);
1870 iexec->execute = false;
1871 }
1872
1873 return 3;
1874}
1875
1876static int
1877init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1878{
1879 /*
1880 * INIT_LTIME opcode: 0x57 ('V')
1881 *
1882 * offset (8 bit): opcode
1883 * offset + 1 (16 bit): time
1884 *
1885 * Sleep for "time" milliseconds.
1886 */
1887
1888 unsigned time = ROM16(bios->data[offset + 1]);
1889
1890 if (!iexec->execute)
1891 return 3;
1892
1893 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
1894 offset, time);
1895
1896 mdelay(time);
1897
1898 return 3;
1899}
1900
1901static int
1902init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1903 struct init_exec *iexec)
1904{
1905 /*
1906 * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
1907 *
1908 * offset (8 bit): opcode
1909 * offset + 1 (32 bit): base register
1910 * offset + 5 (8 bit): count
1911 * offset + 6 (32 bit): value 1
1912 * ...
1913 *
1914 * Starting at offset + 6 there are "count" 32 bit values.
1915 * For "count" iterations set "base register" + 4 * current_iteration
1916 * to "value current_iteration"
1917 */
1918
1919 uint32_t basereg = ROM32(bios->data[offset + 1]);
1920 uint32_t count = bios->data[offset + 5];
1921 int len = 6 + count * 4;
1922 int i;
1923
1924 if (!iexec->execute)
1925 return len;
1926
1927 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1928 offset, basereg, count);
1929
1930 for (i = 0; i < count; i++) {
1931 uint32_t reg = basereg + i * 4;
1932 uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
1933
1934 bios_wr32(bios, reg, data);
1935 }
1936
1937 return len;
1938}
1939
1940static int
1941init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1942{
1943 /*
1944 * INIT_SUB_DIRECT opcode: 0x5B ('[')
1945 *
1946 * offset (8 bit): opcode
1947 * offset + 1 (16 bit): subroutine offset (in bios)
1948 *
1949 * Calls a subroutine that will execute commands until INIT_DONE
1950 * is found.
1951 */
1952
1953 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1954
1955 if (!iexec->execute)
1956 return 3;
1957
1958 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1959 offset, sub_offset);
1960
1961 parse_init_table(bios, sub_offset, iexec);
1962
1963 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1964
1965 return 3;
1966}
1967
1968static int
1969init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1970{
1971 /*
1972 * INIT_JUMP opcode: 0x5C ('\')
1973 *
1974 * offset (8 bit): opcode
1975 * offset + 1 (16 bit): offset (in bios)
1976 *
1977 * Continue execution of init table from 'offset'
1978 */
1979
1980 uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
1981
1982 if (!iexec->execute)
1983 return 3;
1984
1985 BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
1986 return jmp_offset - offset;
1987}
1988
1989static int
1990init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1991{
1992 /*
1993 * INIT_I2C_IF opcode: 0x5E ('^')
1994 *
1995 * offset (8 bit): opcode
1996 * offset + 1 (8 bit): DCB I2C table entry index
1997 * offset + 2 (8 bit): I2C slave address
1998 * offset + 3 (8 bit): I2C register
1999 * offset + 4 (8 bit): mask
2000 * offset + 5 (8 bit): data
2001 *
2002 * Read the register given by "I2C register" on the device addressed
2003 * by "I2C slave address" on the I2C bus given by "DCB I2C table
2004 * entry index". Compare the result AND "mask" to "data".
2005 * If they're not equal, skip subsequent opcodes until condition is
2006 * inverted (INIT_NOT), or we hit INIT_RESUME
2007 */
2008
2009 uint8_t i2c_index = bios->data[offset + 1];
2010 uint8_t i2c_address = bios->data[offset + 2] >> 1;
2011 uint8_t reg = bios->data[offset + 3];
2012 uint8_t mask = bios->data[offset + 4];
2013 uint8_t data = bios->data[offset + 5];
2014 struct nouveau_i2c_chan *chan;
2015 union i2c_smbus_data val;
2016 int ret;
2017
2018 /* no execute check by design */
2019
2020 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
2021 offset, i2c_index, i2c_address);
2022
2023 chan = init_i2c_device_find(bios->dev, i2c_index);
2024 if (!chan)
2025 return -ENODEV;
2026
2027 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
2028 I2C_SMBUS_READ, reg,
2029 I2C_SMBUS_BYTE_DATA, &val);
2030 if (ret < 0) {
2031 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: [no device], "
2032 "Mask: 0x%02X, Data: 0x%02X\n",
2033 offset, reg, mask, data);
2034 iexec->execute = 0;
2035 return 6;
2036 }
2037
2038 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
2039 "Mask: 0x%02X, Data: 0x%02X\n",
2040 offset, reg, val.byte, mask, data);
2041
2042 iexec->execute = ((val.byte & mask) == data);
2043
2044 return 6;
2045}
2046
2047static int
2048init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2049{
2050 /*
2051 * INIT_COPY_NV_REG opcode: 0x5F ('_')
2052 *
2053 * offset (8 bit): opcode
2054 * offset + 1 (32 bit): src reg
2055 * offset + 5 (8 bit): shift
2056 * offset + 6 (32 bit): src mask
2057 * offset + 10 (32 bit): xor
2058 * offset + 14 (32 bit): dst reg
2059 * offset + 18 (32 bit): dst mask
2060 *
2061 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
2062 * "src mask", then XOR with "xor". Write this OR'd with
2063 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
2064 */
2065
2066 uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
2067 uint8_t shift = bios->data[offset + 5];
2068 uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
2069 uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
2070 uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
2071 uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
2072 uint32_t srcvalue, dstvalue;
2073
2074 if (!iexec->execute)
2075 return 22;
2076
2077 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
2078 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
2079 offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
2080
2081 srcvalue = bios_rd32(bios, srcreg);
2082
2083 if (shift < 0x80)
2084 srcvalue >>= shift;
2085 else
2086 srcvalue <<= (0x100 - shift);
2087
2088 srcvalue = (srcvalue & srcmask) ^ xor;
2089
2090 dstvalue = bios_rd32(bios, dstreg) & dstmask;
2091
2092 bios_wr32(bios, dstreg, dstvalue | srcvalue);
2093
2094 return 22;
2095}
2096
2097static int
2098init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2099{
2100 /*
2101 * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
2102 *
2103 * offset (8 bit): opcode
2104 * offset + 1 (16 bit): CRTC port
2105 * offset + 3 (8 bit): CRTC index
2106 * offset + 4 (8 bit): data
2107 *
2108 * Write "data" to index "CRTC index" of "CRTC port"
2109 */
2110 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2111 uint8_t crtcindex = bios->data[offset + 3];
2112 uint8_t data = bios->data[offset + 4];
2113
2114 if (!iexec->execute)
2115 return 5;
2116
2117 bios_idxprt_wr(bios, crtcport, crtcindex, data);
2118
2119 return 5;
2120}
2121
2122static inline void
2123bios_md32(struct nvbios *bios, uint32_t reg,
2124 uint32_t mask, uint32_t val)
2125{
2126 bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val);
2127}
2128
2129static uint32_t
2130peek_fb(struct drm_device *dev, struct io_mapping *fb,
2131 uint32_t off)
2132{
2133 uint32_t val = 0;
2134
2135 if (off < pci_resource_len(dev->pdev, 1)) {
2136 uint8_t __iomem *p =
2137 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2138
2139 val = ioread32(p + (off & ~PAGE_MASK));
2140
2141 io_mapping_unmap_atomic(p);
2142 }
2143
2144 return val;
2145}
2146
2147static void
2148poke_fb(struct drm_device *dev, struct io_mapping *fb,
2149 uint32_t off, uint32_t val)
2150{
2151 if (off < pci_resource_len(dev->pdev, 1)) {
2152 uint8_t __iomem *p =
2153 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2154
2155 iowrite32(val, p + (off & ~PAGE_MASK));
2156 wmb();
2157
2158 io_mapping_unmap_atomic(p);
2159 }
2160}
2161
2162static inline bool
2163read_back_fb(struct drm_device *dev, struct io_mapping *fb,
2164 uint32_t off, uint32_t val)
2165{
2166 poke_fb(dev, fb, off, val);
2167 return val == peek_fb(dev, fb, off);
2168}
2169
2170static int
2171nv04_init_compute_mem(struct nvbios *bios)
2172{
2173 struct drm_device *dev = bios->dev;
2174 uint32_t patt = 0xdeadbeef;
2175 struct io_mapping *fb;
2176 int i;
2177
2178 /* Map the framebuffer aperture */
2179 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2180 pci_resource_len(dev->pdev, 1));
2181 if (!fb)
2182 return -ENOMEM;
2183
2184 /* Sequencer and refresh off */
2185 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
2186 bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
2187
2188 bios_md32(bios, NV04_PFB_BOOT_0, ~0,
2189 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
2190 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2191 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
2192
2193 for (i = 0; i < 4; i++)
2194 poke_fb(dev, fb, 4 * i, patt);
2195
2196 poke_fb(dev, fb, 0x400000, patt + 1);
2197
2198 if (peek_fb(dev, fb, 0) == patt + 1) {
2199 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
2200 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
2201 bios_md32(bios, NV04_PFB_DEBUG_0,
2202 NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2203
2204 for (i = 0; i < 4; i++)
2205 poke_fb(dev, fb, 4 * i, patt);
2206
2207 if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff))
2208 bios_md32(bios, NV04_PFB_BOOT_0,
2209 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2210 NV04_PFB_BOOT_0_RAM_AMOUNT,
2211 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2212
2213 } else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) !=
2214 (patt & 0xffff0000)) {
2215 bios_md32(bios, NV04_PFB_BOOT_0,
2216 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2217 NV04_PFB_BOOT_0_RAM_AMOUNT,
2218 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2219
2220 } else if (peek_fb(dev, fb, 0) != patt) {
2221 if (read_back_fb(dev, fb, 0x800000, patt))
2222 bios_md32(bios, NV04_PFB_BOOT_0,
2223 NV04_PFB_BOOT_0_RAM_AMOUNT,
2224 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2225 else
2226 bios_md32(bios, NV04_PFB_BOOT_0,
2227 NV04_PFB_BOOT_0_RAM_AMOUNT,
2228 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2229
2230 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
2231 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
2232
2233 } else if (!read_back_fb(dev, fb, 0x800000, patt)) {
2234 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2235 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2236
2237 }
2238
2239 /* Refresh on, sequencer on */
2240 bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2241 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
2242
2243 io_mapping_free(fb);
2244 return 0;
2245}
2246
2247static const uint8_t *
2248nv05_memory_config(struct nvbios *bios)
2249{
2250 /* Defaults for BIOSes lacking a memory config table */
2251 static const uint8_t default_config_tab[][2] = {
2252 { 0x24, 0x00 },
2253 { 0x28, 0x00 },
2254 { 0x24, 0x01 },
2255 { 0x1f, 0x00 },
2256 { 0x0f, 0x00 },
2257 { 0x17, 0x00 },
2258 { 0x06, 0x00 },
2259 { 0x00, 0x00 }
2260 };
2261 int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) &
2262 NV_PEXTDEV_BOOT_0_RAMCFG) >> 2;
2263
2264 if (bios->legacy.mem_init_tbl_ptr)
2265 return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i];
2266 else
2267 return default_config_tab[i];
2268}
2269
2270static int
2271nv05_init_compute_mem(struct nvbios *bios)
2272{
2273 struct drm_device *dev = bios->dev;
2274 const uint8_t *ramcfg = nv05_memory_config(bios);
2275 uint32_t patt = 0xdeadbeef;
2276 struct io_mapping *fb;
2277 int i, v;
2278
2279 /* Map the framebuffer aperture */
2280 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2281 pci_resource_len(dev->pdev, 1));
2282 if (!fb)
2283 return -ENOMEM;
2284
2285 /* Sequencer off */
2286 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
2287
2288 if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
2289 goto out;
2290
2291 bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2292
2293 /* If present load the hardcoded scrambling table */
2294 if (bios->legacy.mem_init_tbl_ptr) {
2295 uint32_t *scramble_tab = (uint32_t *)&bios->data[
2296 bios->legacy.mem_init_tbl_ptr + 0x10];
2297
2298 for (i = 0; i < 8; i++)
2299 bios_wr32(bios, NV04_PFB_SCRAMBLE(i),
2300 ROM32(scramble_tab[i]));
2301 }
2302
2303 /* Set memory type/width/length defaults depending on the straps */
2304 bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
2305
2306 if (ramcfg[1] & 0x80)
2307 bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
2308
2309 bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
2310 bios_md32(bios, NV04_PFB_CFG1, 0, 1);
2311
2312 /* Probe memory bus width */
2313 for (i = 0; i < 4; i++)
2314 poke_fb(dev, fb, 4 * i, patt);
2315
2316 if (peek_fb(dev, fb, 0xc) != patt)
2317 bios_md32(bios, NV04_PFB_BOOT_0,
2318 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
2319
2320 /* Probe memory length */
2321 v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
2322
2323 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
2324 (!read_back_fb(dev, fb, 0x1000000, ++patt) ||
2325 !read_back_fb(dev, fb, 0, ++patt)))
2326 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2327 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
2328
2329 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
2330 !read_back_fb(dev, fb, 0x800000, ++patt))
2331 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2332 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2333
2334 if (!read_back_fb(dev, fb, 0x400000, ++patt))
2335 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2336 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2337
2338out:
2339 /* Sequencer on */
2340 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
2341
2342 io_mapping_free(fb);
2343 return 0;
2344}
2345
2346static int
2347nv10_init_compute_mem(struct nvbios *bios)
2348{
2349 struct drm_device *dev = bios->dev;
2350 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2351 const int mem_width[] = { 0x10, 0x00, 0x20 };
2352 const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2);
2353 uint32_t patt = 0xdeadbeef;
2354 struct io_mapping *fb;
2355 int i, j, k;
2356
2357 /* Map the framebuffer aperture */
2358 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2359 pci_resource_len(dev->pdev, 1));
2360 if (!fb)
2361 return -ENOMEM;
2362
2363 bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
2364
2365 /* Probe memory bus width */
2366 for (i = 0; i < mem_width_count; i++) {
2367 bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]);
2368
2369 for (j = 0; j < 4; j++) {
2370 for (k = 0; k < 4; k++)
2371 poke_fb(dev, fb, 0x1c, 0);
2372
2373 poke_fb(dev, fb, 0x1c, patt);
2374 poke_fb(dev, fb, 0x3c, 0);
2375
2376 if (peek_fb(dev, fb, 0x1c) == patt)
2377 goto mem_width_found;
2378 }
2379 }
2380
2381mem_width_found:
2382 patt <<= 1;
2383
2384 /* Probe amount of installed memory */
2385 for (i = 0; i < 4; i++) {
2386 int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000;
2387
2388 poke_fb(dev, fb, off, patt);
2389 poke_fb(dev, fb, 0, 0);
2390
2391 peek_fb(dev, fb, 0);
2392 peek_fb(dev, fb, 0);
2393 peek_fb(dev, fb, 0);
2394 peek_fb(dev, fb, 0);
2395
2396 if (peek_fb(dev, fb, off) == patt)
2397 goto amount_found;
2398 }
2399
2400 /* IC missing - disable the upper half memory space. */
2401 bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0);
2402
2403amount_found:
2404 io_mapping_free(fb);
2405 return 0;
2406}
2407
2408static int
2409nv20_init_compute_mem(struct nvbios *bios)
2410{
2411 struct drm_device *dev = bios->dev;
2412 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2413 uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900);
2414 uint32_t amount, off;
2415 struct io_mapping *fb;
2416
2417 /* Map the framebuffer aperture */
2418 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2419 pci_resource_len(dev->pdev, 1));
2420 if (!fb)
2421 return -ENOMEM;
2422
2423 bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
2424
2425 /* Allow full addressing */
2426 bios_md32(bios, NV04_PFB_CFG0, 0, mask);
2427
2428 amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
2429 for (off = amount; off > 0x2000000; off -= 0x2000000)
2430 poke_fb(dev, fb, off - 4, off);
2431
2432 amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
2433 if (amount != peek_fb(dev, fb, amount - 4))
2434 /* IC missing - disable the upper half memory space. */
2435 bios_md32(bios, NV04_PFB_CFG0, mask, 0);
2436
2437 io_mapping_free(fb);
2438 return 0;
2439}
2440
2441static int
2442init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2443{
2444 /*
2445 * INIT_COMPUTE_MEM opcode: 0x63 ('c')
2446 *
2447 * offset (8 bit): opcode
2448 *
2449 * This opcode is meant to set the PFB memory config registers
2450 * appropriately so that we can correctly calculate how much VRAM it
2451 * has (on nv10 and better chipsets the amount of installed VRAM is
2452 * subsequently reported in NV_PFB_CSTATUS (0x10020C)).
2453 *
2454 * The implementation of this opcode in general consists of several
2455 * parts:
2456 *
2457 * 1) Determination of memory type and density. Only necessary for
2458 * really old chipsets, the memory type reported by the strap bits
2459 * (0x101000) is assumed to be accurate on nv05 and newer.
2460 *
2461 * 2) Determination of the memory bus width. Usually done by a cunning
2462 * combination of writes to offsets 0x1c and 0x3c in the fb, and
2463 * seeing whether the written values are read back correctly.
2464 *
2465 * Only necessary on nv0x-nv1x and nv34, on the other cards we can
2466 * trust the straps.
2467 *
2468 * 3) Determination of how many of the card's RAM pads have ICs
2469 * attached, usually done by a cunning combination of writes to an
2470 * offset slightly less than the maximum memory reported by
2471 * NV_PFB_CSTATUS, then seeing if the test pattern can be read back.
2472 *
2473 * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io
2474 * logs of the VBIOS and kmmio traces of the binary driver POSTing the
2475 * card show nothing being done for this opcode. Why is it still listed
2476 * in the table?!
2477 */
2478
2479 /* no iexec->execute check by design */
2480
2481 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2482 int ret;
2483
2484 if (dev_priv->chipset >= 0x40 ||
2485 dev_priv->chipset == 0x1a ||
2486 dev_priv->chipset == 0x1f)
2487 ret = 0;
2488 else if (dev_priv->chipset >= 0x20 &&
2489 dev_priv->chipset != 0x34)
2490 ret = nv20_init_compute_mem(bios);
2491 else if (dev_priv->chipset >= 0x10)
2492 ret = nv10_init_compute_mem(bios);
2493 else if (dev_priv->chipset >= 0x5)
2494 ret = nv05_init_compute_mem(bios);
2495 else
2496 ret = nv04_init_compute_mem(bios);
2497
2498 if (ret)
2499 return ret;
2500
2501 return 1;
2502}
2503
2504static int
2505init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2506{
2507 /*
2508 * INIT_RESET opcode: 0x65 ('e')
2509 *
2510 * offset (8 bit): opcode
2511 * offset + 1 (32 bit): register
2512 * offset + 5 (32 bit): value1
2513 * offset + 9 (32 bit): value2
2514 *
2515 * Assign "value1" to "register", then assign "value2" to "register"
2516 */
2517
2518 uint32_t reg = ROM32(bios->data[offset + 1]);
2519 uint32_t value1 = ROM32(bios->data[offset + 5]);
2520 uint32_t value2 = ROM32(bios->data[offset + 9]);
2521 uint32_t pci_nv_19, pci_nv_20;
2522
2523 /* no iexec->execute check by design */
2524
2525 pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
2526 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00);
2527
2528 bios_wr32(bios, reg, value1);
2529
2530 udelay(10);
2531
2532 bios_wr32(bios, reg, value2);
2533 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
2534
2535 pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
2536 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
2537 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
2538
2539 return 13;
2540}
2541
2542static int
2543init_configure_mem(struct nvbios *bios, uint16_t offset,
2544 struct init_exec *iexec)
2545{
2546 /*
2547 * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
2548 *
2549 * offset (8 bit): opcode
2550 *
2551 * Equivalent to INIT_DONE on bios version 3 or greater.
2552 * For early bios versions, sets up the memory registers, using values
2553 * taken from the memory init table
2554 */
2555
2556 /* no iexec->execute check by design */
2557
2558 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2559 uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
2560 uint32_t reg, data;
2561
2562 if (bios->major_version > 2)
2563 return 0;
2564
2565 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
2566 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
2567
2568 if (bios->data[meminitoffs] & 1)
2569 seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
2570
2571 for (reg = ROM32(bios->data[seqtbloffs]);
2572 reg != 0xffffffff;
2573 reg = ROM32(bios->data[seqtbloffs += 4])) {
2574
2575 switch (reg) {
2576 case NV04_PFB_PRE:
2577 data = NV04_PFB_PRE_CMD_PRECHARGE;
2578 break;
2579 case NV04_PFB_PAD:
2580 data = NV04_PFB_PAD_CKE_NORMAL;
2581 break;
2582 case NV04_PFB_REF:
2583 data = NV04_PFB_REF_CMD_REFRESH;
2584 break;
2585 default:
2586 data = ROM32(bios->data[meminitdata]);
2587 meminitdata += 4;
2588 if (data == 0xffffffff)
2589 continue;
2590 }
2591
2592 bios_wr32(bios, reg, data);
2593 }
2594
2595 return 1;
2596}
2597
2598static int
2599init_configure_clk(struct nvbios *bios, uint16_t offset,
2600 struct init_exec *iexec)
2601{
2602 /*
2603 * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
2604 *
2605 * offset (8 bit): opcode
2606 *
2607 * Equivalent to INIT_DONE on bios version 3 or greater.
2608 * For early bios versions, sets up the NVClk and MClk PLLs, using
2609 * values taken from the memory init table
2610 */
2611
2612 /* no iexec->execute check by design */
2613
2614 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2615 int clock;
2616
2617 if (bios->major_version > 2)
2618 return 0;
2619
2620 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2621 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
2622
2623 clock = ROM16(bios->data[meminitoffs + 2]) * 10;
2624 if (bios->data[meminitoffs] & 1) /* DDR */
2625 clock *= 2;
2626 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2627
2628 return 1;
2629}
2630
2631static int
2632init_configure_preinit(struct nvbios *bios, uint16_t offset,
2633 struct init_exec *iexec)
2634{
2635 /*
2636 * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
2637 *
2638 * offset (8 bit): opcode
2639 *
2640 * Equivalent to INIT_DONE on bios version 3 or greater.
2641 * For early bios versions, does early init, loading ram and crystal
2642 * configuration from straps into CR3C
2643 */
2644
2645 /* no iexec->execute check by design */
2646
2647 uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
2648 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & 0x40) >> 6;
2649
2650 if (bios->major_version > 2)
2651 return 0;
2652
2653 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2654 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2655
2656 return 1;
2657}
2658
2659static int
2660init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2661{
2662 /*
2663 * INIT_IO opcode: 0x69 ('i')
2664 *
2665 * offset (8 bit): opcode
2666 * offset + 1 (16 bit): CRTC port
2667 * offset + 3 (8 bit): mask
2668 * offset + 4 (8 bit): data
2669 *
2670 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
2671 */
2672
2673 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2674 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2675 uint8_t mask = bios->data[offset + 3];
2676 uint8_t data = bios->data[offset + 4];
2677
2678 if (!iexec->execute)
2679 return 5;
2680
2681 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2682 offset, crtcport, mask, data);
2683
2684 /*
2685 * I have no idea what this does, but NVIDIA do this magic sequence
2686 * in the places where this INIT_IO happens..
2687 */
2688 if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
2689 int i;
2690
2691 bios_wr32(bios, 0x614100, (bios_rd32(
2692 bios, 0x614100) & 0x0fffffff) | 0x00800000);
2693
2694 bios_wr32(bios, 0x00e18c, bios_rd32(
2695 bios, 0x00e18c) | 0x00020000);
2696
2697 bios_wr32(bios, 0x614900, (bios_rd32(
2698 bios, 0x614900) & 0x0fffffff) | 0x00800000);
2699
2700 bios_wr32(bios, 0x000200, bios_rd32(
2701 bios, 0x000200) & ~0x40000000);
2702
2703 mdelay(10);
2704
2705 bios_wr32(bios, 0x00e18c, bios_rd32(
2706 bios, 0x00e18c) & ~0x00020000);
2707
2708 bios_wr32(bios, 0x000200, bios_rd32(
2709 bios, 0x000200) | 0x40000000);
2710
2711 bios_wr32(bios, 0x614100, 0x00800018);
2712 bios_wr32(bios, 0x614900, 0x00800018);
2713
2714 mdelay(10);
2715
2716 bios_wr32(bios, 0x614100, 0x10000018);
2717 bios_wr32(bios, 0x614900, 0x10000018);
2718
2719 for (i = 0; i < 3; i++)
2720 bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
2721 bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
2722
2723 for (i = 0; i < 2; i++)
2724 bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
2725 bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
2726
2727 for (i = 0; i < 3; i++)
2728 bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
2729 bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
2730
2731 for (i = 0; i < 2; i++)
2732 bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
2733 bios, 0x614200 + (i*0x800)) & 0xfffffff0);
2734
2735 for (i = 0; i < 2; i++)
2736 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2737 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2738 return 5;
2739 }
2740
2741 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2742 data);
2743 return 5;
2744}
2745
2746static int
2747init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2748{
2749 /*
2750 * INIT_SUB opcode: 0x6B ('k')
2751 *
2752 * offset (8 bit): opcode
2753 * offset + 1 (8 bit): script number
2754 *
2755 * Execute script number "script number", as a subroutine
2756 */
2757
2758 uint8_t sub = bios->data[offset + 1];
2759
2760 if (!iexec->execute)
2761 return 2;
2762
2763 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2764
2765 parse_init_table(bios,
2766 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
2767 iexec);
2768
2769 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2770
2771 return 2;
2772}
2773
2774static int
2775init_ram_condition(struct nvbios *bios, uint16_t offset,
2776 struct init_exec *iexec)
2777{
2778 /*
2779 * INIT_RAM_CONDITION opcode: 0x6D ('m')
2780 *
2781 * offset (8 bit): opcode
2782 * offset + 1 (8 bit): mask
2783 * offset + 2 (8 bit): cmpval
2784 *
2785 * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval".
2786 * If condition not met skip subsequent opcodes until condition is
2787 * inverted (INIT_NOT), or we hit INIT_RESUME
2788 */
2789
2790 uint8_t mask = bios->data[offset + 1];
2791 uint8_t cmpval = bios->data[offset + 2];
2792 uint8_t data;
2793
2794 if (!iexec->execute)
2795 return 3;
2796
2797 data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask;
2798
2799 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
2800 offset, data, cmpval);
2801
2802 if (data == cmpval)
2803 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2804 else {
2805 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2806 iexec->execute = false;
2807 }
2808
2809 return 3;
2810}
2811
2812static int
2813init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2814{
2815 /*
2816 * INIT_NV_REG opcode: 0x6E ('n')
2817 *
2818 * offset (8 bit): opcode
2819 * offset + 1 (32 bit): register
2820 * offset + 5 (32 bit): mask
2821 * offset + 9 (32 bit): data
2822 *
2823 * Assign ((REGVAL("register") & "mask") | "data") to "register"
2824 */
2825
2826 uint32_t reg = ROM32(bios->data[offset + 1]);
2827 uint32_t mask = ROM32(bios->data[offset + 5]);
2828 uint32_t data = ROM32(bios->data[offset + 9]);
2829
2830 if (!iexec->execute)
2831 return 13;
2832
2833 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2834 offset, reg, mask, data);
2835
2836 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2837
2838 return 13;
2839}
2840
2841static int
2842init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2843{
2844 /*
2845 * INIT_MACRO opcode: 0x6F ('o')
2846 *
2847 * offset (8 bit): opcode
2848 * offset + 1 (8 bit): macro number
2849 *
2850 * Look up macro index "macro number" in the macro index table.
2851 * The macro index table entry has 1 byte for the index in the macro
2852 * table, and 1 byte for the number of times to repeat the macro.
2853 * The macro table entry has 4 bytes for the register address and
2854 * 4 bytes for the value to write to that register
2855 */
2856
2857 uint8_t macro_index_tbl_idx = bios->data[offset + 1];
2858 uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
2859 uint8_t macro_tbl_idx = bios->data[tmp];
2860 uint8_t count = bios->data[tmp + 1];
2861 uint32_t reg, data;
2862 int i;
2863
2864 if (!iexec->execute)
2865 return 2;
2866
2867 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2868 "Count: 0x%02X\n",
2869 offset, macro_index_tbl_idx, macro_tbl_idx, count);
2870
2871 for (i = 0; i < count; i++) {
2872 uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
2873
2874 reg = ROM32(bios->data[macroentryptr]);
2875 data = ROM32(bios->data[macroentryptr + 4]);
2876
2877 bios_wr32(bios, reg, data);
2878 }
2879
2880 return 2;
2881}
2882
2883static int
2884init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2885{
2886 /*
2887 * INIT_DONE opcode: 0x71 ('q')
2888 *
2889 * offset (8 bit): opcode
2890 *
2891 * End the current script
2892 */
2893
2894 /* mild retval abuse to stop parsing this table */
2895 return 0;
2896}
2897
2898static int
2899init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2900{
2901 /*
2902 * INIT_RESUME opcode: 0x72 ('r')
2903 *
2904 * offset (8 bit): opcode
2905 *
2906 * End the current execute / no-execute condition
2907 */
2908
2909 if (iexec->execute)
2910 return 1;
2911
2912 iexec->execute = true;
2913 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2914
2915 return 1;
2916}
2917
2918static int
2919init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2920{
2921 /*
2922 * INIT_TIME opcode: 0x74 ('t')
2923 *
2924 * offset (8 bit): opcode
2925 * offset + 1 (16 bit): time
2926 *
2927 * Sleep for "time" microseconds.
2928 */
2929
2930 unsigned time = ROM16(bios->data[offset + 1]);
2931
2932 if (!iexec->execute)
2933 return 3;
2934
2935 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2936 offset, time);
2937
2938 if (time < 1000)
2939 udelay(time);
2940 else
2941 mdelay((time + 900) / 1000);
2942
2943 return 3;
2944}
2945
2946static int
2947init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2948{
2949 /*
2950 * INIT_CONDITION opcode: 0x75 ('u')
2951 *
2952 * offset (8 bit): opcode
2953 * offset + 1 (8 bit): condition number
2954 *
2955 * Check condition "condition number" in the condition table.
2956 * If condition not met skip subsequent opcodes until condition is
2957 * inverted (INIT_NOT), or we hit INIT_RESUME
2958 */
2959
2960 uint8_t cond = bios->data[offset + 1];
2961
2962 if (!iexec->execute)
2963 return 2;
2964
2965 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2966
2967 if (bios_condition_met(bios, offset, cond))
2968 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2969 else {
2970 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2971 iexec->execute = false;
2972 }
2973
2974 return 2;
2975}
2976
2977static int
2978init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2979{
2980 /*
2981 * INIT_IO_CONDITION opcode: 0x76
2982 *
2983 * offset (8 bit): opcode
2984 * offset + 1 (8 bit): condition number
2985 *
2986 * Check condition "condition number" in the io condition table.
2987 * If condition not met skip subsequent opcodes until condition is
2988 * inverted (INIT_NOT), or we hit INIT_RESUME
2989 */
2990
2991 uint8_t cond = bios->data[offset + 1];
2992
2993 if (!iexec->execute)
2994 return 2;
2995
2996 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2997
2998 if (io_condition_met(bios, offset, cond))
2999 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
3000 else {
3001 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
3002 iexec->execute = false;
3003 }
3004
3005 return 2;
3006}
3007
3008static int
3009init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3010{
3011 /*
3012 * INIT_INDEX_IO opcode: 0x78 ('x')
3013 *
3014 * offset (8 bit): opcode
3015 * offset + 1 (16 bit): CRTC port
3016 * offset + 3 (8 bit): CRTC index
3017 * offset + 4 (8 bit): mask
3018 * offset + 5 (8 bit): data
3019 *
3020 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
3021 * OR with "data", write-back
3022 */
3023
3024 uint16_t crtcport = ROM16(bios->data[offset + 1]);
3025 uint8_t crtcindex = bios->data[offset + 3];
3026 uint8_t mask = bios->data[offset + 4];
3027 uint8_t data = bios->data[offset + 5];
3028 uint8_t value;
3029
3030 if (!iexec->execute)
3031 return 6;
3032
3033 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
3034 "Data: 0x%02X\n",
3035 offset, crtcport, crtcindex, mask, data);
3036
3037 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
3038 bios_idxprt_wr(bios, crtcport, crtcindex, value);
3039
3040 return 6;
3041}
3042
3043static int
3044init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3045{
3046 /*
3047 * INIT_PLL opcode: 0x79 ('y')
3048 *
3049 * offset (8 bit): opcode
3050 * offset + 1 (32 bit): register
3051 * offset + 5 (16 bit): freq
3052 *
3053 * Set PLL register "register" to coefficients for frequency (10kHz)
3054 * "freq"
3055 */
3056
3057 uint32_t reg = ROM32(bios->data[offset + 1]);
3058 uint16_t freq = ROM16(bios->data[offset + 5]);
3059
3060 if (!iexec->execute)
3061 return 7;
3062
3063 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
3064
3065 setPLL(bios, reg, freq * 10);
3066
3067 return 7;
3068}
3069
3070static int
3071init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3072{
3073 /*
3074 * INIT_ZM_REG opcode: 0x7A ('z')
3075 *
3076 * offset (8 bit): opcode
3077 * offset + 1 (32 bit): register
3078 * offset + 5 (32 bit): value
3079 *
3080 * Assign "value" to "register"
3081 */
3082
3083 uint32_t reg = ROM32(bios->data[offset + 1]);
3084 uint32_t value = ROM32(bios->data[offset + 5]);
3085
3086 if (!iexec->execute)
3087 return 9;
3088
3089 if (reg == 0x000200)
3090 value |= 1;
3091
3092 bios_wr32(bios, reg, value);
3093
3094 return 9;
3095}
3096
3097static int
3098init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
3099 struct init_exec *iexec)
3100{
3101 /*
3102 * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
3103 *
3104 * offset (8 bit): opcode
3105 * offset + 1 (8 bit): PLL type
3106 * offset + 2 (32 bit): frequency 0
3107 *
3108 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
3109 * ram_restrict_table_ptr. The value read from there is used to select
3110 * a frequency from the table starting at 'frequency 0' to be
3111 * programmed into the PLL corresponding to 'type'.
3112 *
3113 * The PLL limits table on cards using this opcode has a mapping of
3114 * 'type' to the relevant registers.
3115 */
3116
3117 struct drm_device *dev = bios->dev;
3118 uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
3119 uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
3120 uint8_t type = bios->data[offset + 1];
3121 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
3122 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
3123 int len = 2 + bios->ram_restrict_group_count * 4;
3124 int i;
3125
3126 if (!iexec->execute)
3127 return len;
3128
3129 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
3130 NV_ERROR(dev, "PLL limits table not version 3.x\n");
3131 return len; /* deliberate, allow default clocks to remain */
3132 }
3133
3134 entry = pll_limits + pll_limits[1];
3135 for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
3136 if (entry[0] == type) {
3137 uint32_t reg = ROM32(entry[3]);
3138
3139 BIOSLOG(bios, "0x%04X: "
3140 "Type %02x Reg 0x%08x Freq %dKHz\n",
3141 offset, type, reg, freq);
3142
3143 setPLL(bios, reg, freq);
3144 return len;
3145 }
3146 }
3147
3148 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
3149 return len;
3150}
3151
3152static int
3153init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3154{
3155 /*
3156 * INIT_8C opcode: 0x8C ('')
3157 *
3158 * NOP so far....
3159 *
3160 */
3161
3162 return 1;
3163}
3164
3165static int
3166init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3167{
3168 /*
3169 * INIT_8D opcode: 0x8D ('')
3170 *
3171 * NOP so far....
3172 *
3173 */
3174
3175 return 1;
3176}
3177
3178static int
3179init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3180{
3181 /*
3182 * INIT_GPIO opcode: 0x8E ('')
3183 *
3184 * offset (8 bit): opcode
3185 *
3186 * Loop over all entries in the DCB GPIO table, and initialise
3187 * each GPIO according to various values listed in each entry
3188 */
3189
3190 if (iexec->execute && bios->execute)
3191 nouveau_gpio_reset(bios->dev);
3192
3193 return 1;
3194}
3195
3196static int
3197init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
3198 struct init_exec *iexec)
3199{
3200 /*
3201 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
3202 *
3203 * offset (8 bit): opcode
3204 * offset + 1 (32 bit): reg
3205 * offset + 5 (8 bit): regincrement
3206 * offset + 6 (8 bit): count
3207 * offset + 7 (32 bit): value 1,1
3208 * ...
3209 *
3210 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
3211 * ram_restrict_table_ptr. The value read from here is 'n', and
3212 * "value 1,n" gets written to "reg". This repeats "count" times and on
3213 * each iteration 'm', "reg" increases by "regincrement" and
3214 * "value m,n" is used. The extent of n is limited by a number read
3215 * from the 'M' BIT table, herein called "blocklen"
3216 */
3217
3218 uint32_t reg = ROM32(bios->data[offset + 1]);
3219 uint8_t regincrement = bios->data[offset + 5];
3220 uint8_t count = bios->data[offset + 6];
3221 uint32_t strap_ramcfg, data;
3222 /* previously set by 'M' BIT table */
3223 uint16_t blocklen = bios->ram_restrict_group_count * 4;
3224 int len = 7 + count * blocklen;
3225 uint8_t index;
3226 int i;
3227
3228 /* critical! to know the length of the opcode */;
3229 if (!blocklen) {
3230 NV_ERROR(bios->dev,
3231 "0x%04X: Zero block length - has the M table "
3232 "been parsed?\n", offset);
3233 return -EINVAL;
3234 }
3235
3236 if (!iexec->execute)
3237 return len;
3238
3239 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
3240 index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
3241
3242 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
3243 "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
3244 offset, reg, regincrement, count, strap_ramcfg, index);
3245
3246 for (i = 0; i < count; i++) {
3247 data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
3248
3249 bios_wr32(bios, reg, data);
3250
3251 reg += regincrement;
3252 }
3253
3254 return len;
3255}
3256
3257static int
3258init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3259{
3260 /*
3261 * INIT_COPY_ZM_REG opcode: 0x90 ('')
3262 *
3263 * offset (8 bit): opcode
3264 * offset + 1 (32 bit): src reg
3265 * offset + 5 (32 bit): dst reg
3266 *
3267 * Put contents of "src reg" into "dst reg"
3268 */
3269
3270 uint32_t srcreg = ROM32(bios->data[offset + 1]);
3271 uint32_t dstreg = ROM32(bios->data[offset + 5]);
3272
3273 if (!iexec->execute)
3274 return 9;
3275
3276 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
3277
3278 return 9;
3279}
3280
3281static int
3282init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
3283 struct init_exec *iexec)
3284{
3285 /*
3286 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
3287 *
3288 * offset (8 bit): opcode
3289 * offset + 1 (32 bit): dst reg
3290 * offset + 5 (8 bit): count
3291 * offset + 6 (32 bit): data 1
3292 * ...
3293 *
3294 * For each of "count" values write "data n" to "dst reg"
3295 */
3296
3297 uint32_t reg = ROM32(bios->data[offset + 1]);
3298 uint8_t count = bios->data[offset + 5];
3299 int len = 6 + count * 4;
3300 int i;
3301
3302 if (!iexec->execute)
3303 return len;
3304
3305 for (i = 0; i < count; i++) {
3306 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
3307 bios_wr32(bios, reg, data);
3308 }
3309
3310 return len;
3311}
3312
3313static int
3314init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3315{
3316 /*
3317 * INIT_RESERVED opcode: 0x92 ('')
3318 *
3319 * offset (8 bit): opcode
3320 *
3321 * Seemingly does nothing
3322 */
3323
3324 return 1;
3325}
3326
3327static int
3328init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3329{
3330 /*
3331 * INIT_96 opcode: 0x96 ('')
3332 *
3333 * offset (8 bit): opcode
3334 * offset + 1 (32 bit): sreg
3335 * offset + 5 (8 bit): sshift
3336 * offset + 6 (8 bit): smask
3337 * offset + 7 (8 bit): index
3338 * offset + 8 (32 bit): reg
3339 * offset + 12 (32 bit): mask
3340 * offset + 16 (8 bit): shift
3341 *
3342 */
3343
3344 uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
3345 uint32_t reg = ROM32(bios->data[offset + 8]);
3346 uint32_t mask = ROM32(bios->data[offset + 12]);
3347 uint32_t val;
3348
3349 val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
3350 if (bios->data[offset + 5] < 0x80)
3351 val >>= bios->data[offset + 5];
3352 else
3353 val <<= (0x100 - bios->data[offset + 5]);
3354 val &= bios->data[offset + 6];
3355
3356 val = bios->data[ROM16(bios->data[xlatptr]) + val];
3357 val <<= bios->data[offset + 16];
3358
3359 if (!iexec->execute)
3360 return 17;
3361
3362 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
3363 return 17;
3364}
3365
3366static int
3367init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3368{
3369 /*
3370 * INIT_97 opcode: 0x97 ('')
3371 *
3372 * offset (8 bit): opcode
3373 * offset + 1 (32 bit): register
3374 * offset + 5 (32 bit): mask
3375 * offset + 9 (32 bit): value
3376 *
3377 * Adds "value" to "register" preserving the fields specified
3378 * by "mask"
3379 */
3380
3381 uint32_t reg = ROM32(bios->data[offset + 1]);
3382 uint32_t mask = ROM32(bios->data[offset + 5]);
3383 uint32_t add = ROM32(bios->data[offset + 9]);
3384 uint32_t val;
3385
3386 val = bios_rd32(bios, reg);
3387 val = (val & mask) | ((val + add) & ~mask);
3388
3389 if (!iexec->execute)
3390 return 13;
3391
3392 bios_wr32(bios, reg, val);
3393 return 13;
3394}
3395
3396static int
3397init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3398{
3399 /*
3400 * INIT_AUXCH opcode: 0x98 ('')
3401 *
3402 * offset (8 bit): opcode
3403 * offset + 1 (32 bit): address
3404 * offset + 5 (8 bit): count
3405 * offset + 6 (8 bit): mask 0
3406 * offset + 7 (8 bit): data 0
3407 * ...
3408 *
3409 */
3410
3411 struct drm_device *dev = bios->dev;
3412 struct nouveau_i2c_chan *auxch;
3413 uint32_t addr = ROM32(bios->data[offset + 1]);
3414 uint8_t count = bios->data[offset + 5];
3415 int len = 6 + count * 2;
3416 int ret, i;
3417
3418 if (!bios->display.output) {
3419 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
3420 return len;
3421 }
3422
3423 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
3424 if (!auxch) {
3425 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
3426 bios->display.output->i2c_index);
3427 return len;
3428 }
3429
3430 if (!iexec->execute)
3431 return len;
3432
3433 offset += 6;
3434 for (i = 0; i < count; i++, offset += 2) {
3435 uint8_t data;
3436
3437 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
3438 if (ret) {
3439 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
3440 return len;
3441 }
3442
3443 data &= bios->data[offset + 0];
3444 data |= bios->data[offset + 1];
3445
3446 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
3447 if (ret) {
3448 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
3449 return len;
3450 }
3451 }
3452
3453 return len;
3454}
3455
3456static int
3457init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3458{
3459 /*
3460 * INIT_ZM_AUXCH opcode: 0x99 ('')
3461 *
3462 * offset (8 bit): opcode
3463 * offset + 1 (32 bit): address
3464 * offset + 5 (8 bit): count
3465 * offset + 6 (8 bit): data 0
3466 * ...
3467 *
3468 */
3469
3470 struct drm_device *dev = bios->dev;
3471 struct nouveau_i2c_chan *auxch;
3472 uint32_t addr = ROM32(bios->data[offset + 1]);
3473 uint8_t count = bios->data[offset + 5];
3474 int len = 6 + count;
3475 int ret, i;
3476
3477 if (!bios->display.output) {
3478 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
3479 return len;
3480 }
3481
3482 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
3483 if (!auxch) {
3484 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
3485 bios->display.output->i2c_index);
3486 return len;
3487 }
3488
3489 if (!iexec->execute)
3490 return len;
3491
3492 offset += 6;
3493 for (i = 0; i < count; i++, offset++) {
3494 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
3495 if (ret) {
3496 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
3497 return len;
3498 }
3499 }
3500
3501 return len;
3502}
3503
3504static int
3505init_i2c_long_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3506{
3507 /*
3508 * INIT_I2C_LONG_IF opcode: 0x9A ('')
3509 *
3510 * offset (8 bit): opcode
3511 * offset + 1 (8 bit): DCB I2C table entry index
3512 * offset + 2 (8 bit): I2C slave address
3513 * offset + 3 (16 bit): I2C register
3514 * offset + 5 (8 bit): mask
3515 * offset + 6 (8 bit): data
3516 *
3517 * Read the register given by "I2C register" on the device addressed
3518 * by "I2C slave address" on the I2C bus given by "DCB I2C table
3519 * entry index". Compare the result AND "mask" to "data".
3520 * If they're not equal, skip subsequent opcodes until condition is
3521 * inverted (INIT_NOT), or we hit INIT_RESUME
3522 */
3523
3524 uint8_t i2c_index = bios->data[offset + 1];
3525 uint8_t i2c_address = bios->data[offset + 2] >> 1;
3526 uint8_t reglo = bios->data[offset + 3];
3527 uint8_t reghi = bios->data[offset + 4];
3528 uint8_t mask = bios->data[offset + 5];
3529 uint8_t data = bios->data[offset + 6];
3530 struct nouveau_i2c_chan *chan;
3531 uint8_t buf0[2] = { reghi, reglo };
3532 uint8_t buf1[1];
3533 struct i2c_msg msg[2] = {
3534 { i2c_address, 0, 1, buf0 },
3535 { i2c_address, I2C_M_RD, 1, buf1 },
3536 };
3537 int ret;
3538
3539 /* no execute check by design */
3540
3541 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
3542 offset, i2c_index, i2c_address);
3543
3544 chan = init_i2c_device_find(bios->dev, i2c_index);
3545 if (!chan)
3546 return -ENODEV;
3547
3548
3549 ret = i2c_transfer(&chan->adapter, msg, 2);
3550 if (ret < 0) {
3551 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: [no device], "
3552 "Mask: 0x%02X, Data: 0x%02X\n",
3553 offset, reghi, reglo, mask, data);
3554 iexec->execute = 0;
3555 return 7;
3556 }
3557
3558 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: 0x%02X, "
3559 "Mask: 0x%02X, Data: 0x%02X\n",
3560 offset, reghi, reglo, buf1[0], mask, data);
3561
3562 iexec->execute = ((buf1[0] & mask) == data);
3563
3564 return 7;
3565}
3566
3567static struct init_tbl_entry itbl_entry[] = {
3568 /* command name , id , length , offset , mult , command handler */
3569 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
3570 { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
3571 { "INIT_REPEAT" , 0x33, init_repeat },
3572 { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
3573 { "INIT_END_REPEAT" , 0x36, init_end_repeat },
3574 { "INIT_COPY" , 0x37, init_copy },
3575 { "INIT_NOT" , 0x38, init_not },
3576 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
3577 { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
3578 { "INIT_OP_3B" , 0x3B, init_op_3b },
3579 { "INIT_OP_3C" , 0x3C, init_op_3c },
3580 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
3581 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
3582 { "INIT_PLL2" , 0x4B, init_pll2 },
3583 { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
3584 { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
3585 { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
3586 { "INIT_TMDS" , 0x4F, init_tmds },
3587 { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
3588 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
3589 { "INIT_CR" , 0x52, init_cr },
3590 { "INIT_ZM_CR" , 0x53, init_zm_cr },
3591 { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
3592 { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
3593 { "INIT_LTIME" , 0x57, init_ltime },
3594 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
3595 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
3596 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
3597 { "INIT_JUMP" , 0x5C, init_jump },
3598 { "INIT_I2C_IF" , 0x5E, init_i2c_if },
3599 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
3600 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
3601 { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
3602 { "INIT_RESET" , 0x65, init_reset },
3603 { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
3604 { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
3605 { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
3606 { "INIT_IO" , 0x69, init_io },
3607 { "INIT_SUB" , 0x6B, init_sub },
3608 { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
3609 { "INIT_NV_REG" , 0x6E, init_nv_reg },
3610 { "INIT_MACRO" , 0x6F, init_macro },
3611 { "INIT_DONE" , 0x71, init_done },
3612 { "INIT_RESUME" , 0x72, init_resume },
3613 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
3614 { "INIT_TIME" , 0x74, init_time },
3615 { "INIT_CONDITION" , 0x75, init_condition },
3616 { "INIT_IO_CONDITION" , 0x76, init_io_condition },
3617 { "INIT_INDEX_IO" , 0x78, init_index_io },
3618 { "INIT_PLL" , 0x79, init_pll },
3619 { "INIT_ZM_REG" , 0x7A, init_zm_reg },
3620 { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
3621 { "INIT_8C" , 0x8C, init_8c },
3622 { "INIT_8D" , 0x8D, init_8d },
3623 { "INIT_GPIO" , 0x8E, init_gpio },
3624 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
3625 { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
3626 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
3627 { "INIT_RESERVED" , 0x92, init_reserved },
3628 { "INIT_96" , 0x96, init_96 },
3629 { "INIT_97" , 0x97, init_97 },
3630 { "INIT_AUXCH" , 0x98, init_auxch },
3631 { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
3632 { "INIT_I2C_LONG_IF" , 0x9A, init_i2c_long_if },
3633 { NULL , 0 , NULL }
3634};
3635
3636#define MAX_TABLE_OPS 1000
3637
3638static int
3639parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3640{
3641 /*
3642 * Parses all commands in an init table.
3643 *
3644 * We start out executing all commands found in the init table. Some
3645 * opcodes may change the status of iexec->execute to SKIP, which will
3646 * cause the following opcodes to perform no operation until the value
3647 * is changed back to EXECUTE.
3648 */
3649
3650 int count = 0, i, ret;
3651 uint8_t id;
3652
3653 /* catch NULL script pointers */
3654 if (offset == 0)
3655 return 0;
3656
3657 /*
3658 * Loop until INIT_DONE causes us to break out of the loop
3659 * (or until offset > bios length just in case... )
3660 * (and no more than MAX_TABLE_OPS iterations, just in case... )
3661 */
3662 while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
3663 id = bios->data[offset];
3664
3665 /* Find matching id in itbl_entry */
3666 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3667 ;
3668
3669 if (!itbl_entry[i].name) {
3670 NV_ERROR(bios->dev,
3671 "0x%04X: Init table command not found: "
3672 "0x%02X\n", offset, id);
3673 return -ENOENT;
3674 }
3675
3676 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
3677 itbl_entry[i].id, itbl_entry[i].name);
3678
3679 /* execute eventual command handler */
3680 ret = (*itbl_entry[i].handler)(bios, offset, iexec);
3681 if (ret < 0) {
3682 NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
3683 "table opcode: %s %d\n", offset,
3684 itbl_entry[i].name, ret);
3685 }
3686
3687 if (ret <= 0)
3688 break;
3689
3690 /*
3691 * Add the offset of the current command including all data
3692 * of that command. The offset will then be pointing on the
3693 * next op code.
3694 */
3695 offset += ret;
3696 }
3697
3698 if (offset >= bios->length)
3699 NV_WARN(bios->dev,
3700 "Offset 0x%04X greater than known bios image length. "
3701 "Corrupt image?\n", offset);
3702 if (count >= MAX_TABLE_OPS)
3703 NV_WARN(bios->dev,
3704 "More than %d opcodes to a table is unlikely, "
3705 "is the bios image corrupt?\n", MAX_TABLE_OPS);
3706
3707 return 0;
3708}
3709
3710static void
3711parse_init_tables(struct nvbios *bios)
3712{
3713 /* Loops and calls parse_init_table() for each present table. */
3714
3715 int i = 0;
3716 uint16_t table;
3717 struct init_exec iexec = {true, false};
3718
3719 if (bios->old_style_init) {
3720 if (bios->init_script_tbls_ptr)
3721 parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
3722 if (bios->extra_init_script_tbl_ptr)
3723 parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
3724
3725 return;
3726 }
3727
3728 while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
3729 NV_INFO(bios->dev,
3730 "Parsing VBIOS init table %d at offset 0x%04X\n",
3731 i / 2, table);
3732 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
3733
3734 parse_init_table(bios, table, &iexec);
3735 i += 2;
3736 }
3737}
3738
3739static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk) 69static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3740{ 70{
3741 int compare_record_len, i = 0; 71 int compare_record_len, i = 0;
@@ -3764,28 +94,24 @@ static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3764 94
3765static void 95static void
3766run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, 96run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3767 struct dcb_entry *dcbent, int head, bool dl) 97 struct dcb_output *dcbent, int head, bool dl)
3768{ 98{
3769 struct drm_nouveau_private *dev_priv = dev->dev_private; 99 struct nouveau_drm *drm = nouveau_drm(dev);
3770 struct nvbios *bios = &dev_priv->vbios;
3771 struct init_exec iexec = {true, false};
3772 100
3773 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", 101 NV_INFO(drm, "0x%04X: Parsing digital output script table\n",
3774 scriptptr); 102 scriptptr);
3775 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44, 103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB :
3776 head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA); 104 NV_CIO_CRE_44_HEADA);
3777 /* note: if dcb entries have been merged, index may be misleading */ 105 nouveau_bios_run_init_table(dev, scriptptr, dcbent, head);
3778 NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
3779 parse_init_table(bios, scriptptr, &iexec);
3780 106
3781 nv04_dfp_bind_head(dev, dcbent, head, dl); 107 nv04_dfp_bind_head(dev, dcbent, head, dl);
3782} 108}
3783 109
3784static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) 110static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script)
3785{ 111{
3786 struct drm_nouveau_private *dev_priv = dev->dev_private; 112 struct nouveau_drm *drm = nouveau_drm(dev);
3787 struct nvbios *bios = &dev_priv->vbios; 113 struct nvbios *bios = &drm->vbios;
3788 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); 114 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
3789 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); 115 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3790 116
3791 if (!bios->fp.xlated_entry || !sub || !scriptofs) 117 if (!bios->fp.xlated_entry || !sub || !scriptofs)
@@ -3808,7 +134,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3808 return 0; 134 return 0;
3809} 135}
3810 136
3811static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) 137static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
3812{ 138{
3813 /* 139 /*
3814 * The BIT LVDS table's header has the information to setup the 140 * The BIT LVDS table's header has the information to setup the
@@ -3820,8 +146,8 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3820 * conf byte. These tables are similar to the TMDS tables, consisting 146 * conf byte. These tables are similar to the TMDS tables, consisting
3821 * of a list of pxclks and script pointers. 147 * of a list of pxclks and script pointers.
3822 */ 148 */
3823 struct drm_nouveau_private *dev_priv = dev->dev_private; 149 struct nouveau_drm *drm = nouveau_drm(dev);
3824 struct nvbios *bios = &dev_priv->vbios; 150 struct nvbios *bios = &drm->vbios;
3825 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 151 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3826 uint16_t scriptptr = 0, clktable; 152 uint16_t scriptptr = 0, clktable;
3827 153
@@ -3866,14 +192,14 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3866 192
3867 clktable = ROM16(bios->data[clktable]); 193 clktable = ROM16(bios->data[clktable]);
3868 if (!clktable) { 194 if (!clktable) {
3869 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 195 NV_ERROR(drm, "Pixel clock comparison table not found\n");
3870 return -ENOENT; 196 return -ENOENT;
3871 } 197 }
3872 scriptptr = clkcmptable(bios, clktable, pxclk); 198 scriptptr = clkcmptable(bios, clktable, pxclk);
3873 } 199 }
3874 200
3875 if (!scriptptr) { 201 if (!scriptptr) {
3876 NV_ERROR(dev, "LVDS output init script not found\n"); 202 NV_ERROR(drm, "LVDS output init script not found\n");
3877 return -ENOENT; 203 return -ENOENT;
3878 } 204 }
3879 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); 205 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
@@ -3881,7 +207,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3881 return 0; 207 return 0;
3882} 208}
3883 209
3884int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) 210int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
3885{ 211{
3886 /* 212 /*
3887 * LVDS operations are multiplexed in an effort to present a single API 213 * LVDS operations are multiplexed in an effort to present a single API
@@ -3889,8 +215,9 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3889 * This acts as the demux 215 * This acts as the demux
3890 */ 216 */
3891 217
3892 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct nouveau_drm *drm = nouveau_drm(dev);
3893 struct nvbios *bios = &dev_priv->vbios; 219 struct nouveau_device *device = nv_device(drm->device);
220 struct nvbios *bios = &drm->vbios;
3894 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 221 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3895 uint32_t sel_clk_binding, sel_clk; 222 uint32_t sel_clk_binding, sel_clk;
3896 int ret; 223 int ret;
@@ -3909,10 +236,10 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3909 if (script == LVDS_RESET && bios->fp.power_off_for_reset) 236 if (script == LVDS_RESET && bios->fp.power_off_for_reset)
3910 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); 237 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
3911 238
3912 NV_TRACE(dev, "Calling LVDS script %d:\n", script); 239 NV_INFO(drm, "Calling LVDS script %d:\n", script);
3913 240
3914 /* don't let script change pll->head binding */ 241 /* don't let script change pll->head binding */
3915 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; 242 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
3916 243
3917 if (lvds_ver < 0x30) 244 if (lvds_ver < 0x30)
3918 ret = call_lvds_manufacturer_script(dev, dcbent, head, script); 245 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -3924,7 +251,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3924 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 251 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3925 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 252 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3926 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ 253 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
3927 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); 254 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
3928 255
3929 return ret; 256 return ret;
3930} 257}
@@ -3942,12 +269,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3942 * the maximum number of records that can be held in the table. 269 * the maximum number of records that can be held in the table.
3943 */ 270 */
3944 271
272 struct nouveau_drm *drm = nouveau_drm(dev);
3945 uint8_t lvds_ver, headerlen, recordlen; 273 uint8_t lvds_ver, headerlen, recordlen;
3946 274
3947 memset(lth, 0, sizeof(struct lvdstableheader)); 275 memset(lth, 0, sizeof(struct lvdstableheader));
3948 276
3949 if (bios->fp.lvdsmanufacturerpointer == 0x0) { 277 if (bios->fp.lvdsmanufacturerpointer == 0x0) {
3950 NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n"); 278 NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n");
3951 return -EINVAL; 279 return -EINVAL;
3952 } 280 }
3953 281
@@ -3961,7 +289,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3961 case 0x30: /* NV4x */ 289 case 0x30: /* NV4x */
3962 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; 290 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3963 if (headerlen < 0x1f) { 291 if (headerlen < 0x1f) {
3964 NV_ERROR(dev, "LVDS table header not understood\n"); 292 NV_ERROR(drm, "LVDS table header not understood\n");
3965 return -EINVAL; 293 return -EINVAL;
3966 } 294 }
3967 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; 295 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
@@ -3969,13 +297,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3969 case 0x40: /* G80/G90 */ 297 case 0x40: /* G80/G90 */
3970 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; 298 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3971 if (headerlen < 0x7) { 299 if (headerlen < 0x7) {
3972 NV_ERROR(dev, "LVDS table header not understood\n"); 300 NV_ERROR(drm, "LVDS table header not understood\n");
3973 return -EINVAL; 301 return -EINVAL;
3974 } 302 }
3975 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; 303 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3976 break; 304 break;
3977 default: 305 default:
3978 NV_ERROR(dev, 306 NV_ERROR(drm,
3979 "LVDS table revision %d.%d not currently supported\n", 307 "LVDS table revision %d.%d not currently supported\n",
3980 lvds_ver >> 4, lvds_ver & 0xf); 308 lvds_ver >> 4, lvds_ver & 0xf);
3981 return -ENOSYS; 309 return -ENOSYS;
@@ -3991,7 +319,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3991static int 319static int
3992get_fp_strap(struct drm_device *dev, struct nvbios *bios) 320get_fp_strap(struct drm_device *dev, struct nvbios *bios)
3993{ 321{
3994 struct drm_nouveau_private *dev_priv = dev->dev_private; 322 struct nouveau_device *device = nouveau_dev(dev);
3995 323
3996 /* 324 /*
3997 * The fp strap is normally dictated by the "User Strap" in 325 * The fp strap is normally dictated by the "User Strap" in
@@ -4005,14 +333,15 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
4005 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 333 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
4006 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 334 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
4007 335
4008 if (dev_priv->card_type >= NV_50) 336 if (device->card_type >= NV_50)
4009 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 337 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
4010 else 338 else
4011 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; 339 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
4012} 340}
4013 341
4014static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) 342static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4015{ 343{
344 struct nouveau_drm *drm = nouveau_drm(dev);
4016 uint8_t *fptable; 345 uint8_t *fptable;
4017 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex; 346 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
4018 int ret, ofs, fpstrapping; 347 int ret, ofs, fpstrapping;
@@ -4022,7 +351,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4022 /* Apple cards don't have the fp table; the laptops use DDC */ 351 /* Apple cards don't have the fp table; the laptops use DDC */
4023 /* The table is also missing on some x86 IGPs */ 352 /* The table is also missing on some x86 IGPs */
4024#ifndef __powerpc__ 353#ifndef __powerpc__
4025 NV_ERROR(dev, "Pointer to flat panel table invalid\n"); 354 NV_ERROR(drm, "Pointer to flat panel table invalid\n");
4026#endif 355#endif
4027 bios->digital_min_front_porch = 0x4b; 356 bios->digital_min_front_porch = 0x4b;
4028 return 0; 357 return 0;
@@ -4061,7 +390,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4061 ofs = -7; 390 ofs = -7;
4062 break; 391 break;
4063 default: 392 default:
4064 NV_ERROR(dev, 393 NV_ERROR(drm,
4065 "FP table revision %d.%d not currently supported\n", 394 "FP table revision %d.%d not currently supported\n",
4066 fptable_ver >> 4, fptable_ver & 0xf); 395 fptable_ver >> 4, fptable_ver & 0xf);
4067 return -ENOSYS; 396 return -ENOSYS;
@@ -4080,7 +409,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4080 bios->fp.xlatwidth = lth.recordlen; 409 bios->fp.xlatwidth = lth.recordlen;
4081 } 410 }
4082 if (bios->fp.fpxlatetableptr == 0x0) { 411 if (bios->fp.fpxlatetableptr == 0x0) {
4083 NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n"); 412 NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n");
4084 return -EINVAL; 413 return -EINVAL;
4085 } 414 }
4086 415
@@ -4090,7 +419,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4090 fpstrapping * bios->fp.xlatwidth]; 419 fpstrapping * bios->fp.xlatwidth];
4091 420
4092 if (fpindex > fpentries) { 421 if (fpindex > fpentries) {
4093 NV_ERROR(dev, "Bad flat panel table index\n"); 422 NV_ERROR(drm, "Bad flat panel table index\n");
4094 return -ENOENT; 423 return -ENOENT;
4095 } 424 }
4096 425
@@ -4109,7 +438,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4109 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen + 438 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
4110 recordlen * fpindex + ofs; 439 recordlen * fpindex + ofs;
4111 440
4112 NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n", 441 NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
4113 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1, 442 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
4114 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1, 443 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
4115 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10); 444 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
@@ -4119,8 +448,8 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4119 448
4120bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) 449bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
4121{ 450{
4122 struct drm_nouveau_private *dev_priv = dev->dev_private; 451 struct nouveau_drm *drm = nouveau_drm(dev);
4123 struct nvbios *bios = &dev_priv->vbios; 452 struct nvbios *bios = &drm->vbios;
4124 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; 453 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
4125 454
4126 if (!mode) /* just checking whether we can produce a mode */ 455 if (!mode) /* just checking whether we can produce a mode */
@@ -4190,8 +519,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4190 * requiring tests against the native-mode pixel clock, cannot be done 519 * requiring tests against the native-mode pixel clock, cannot be done
4191 * until later, when this function should be called with non-zero pxclk 520 * until later, when this function should be called with non-zero pxclk
4192 */ 521 */
4193 struct drm_nouveau_private *dev_priv = dev->dev_private; 522 struct nouveau_drm *drm = nouveau_drm(dev);
4194 struct nvbios *bios = &dev_priv->vbios; 523 struct nvbios *bios = &drm->vbios;
4195 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; 524 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
4196 struct lvdstableheader lth; 525 struct lvdstableheader lth;
4197 uint16_t lvdsofs; 526 uint16_t lvdsofs;
@@ -4252,7 +581,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4252 lvdsmanufacturerindex = fpstrapping; 581 lvdsmanufacturerindex = fpstrapping;
4253 break; 582 break;
4254 default: 583 default:
4255 NV_ERROR(dev, "LVDS table revision not currently supported\n"); 584 NV_ERROR(drm, "LVDS table revision not currently supported\n");
4256 return -ENOSYS; 585 return -ENOSYS;
4257 } 586 }
4258 587
@@ -4300,7 +629,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4300 * This function returns true if a particular DCB entry matches. 629 * This function returns true if a particular DCB entry matches.
4301 */ 630 */
4302bool 631bool
4303bios_encoder_match(struct dcb_entry *dcb, u32 hash) 632bios_encoder_match(struct dcb_output *dcb, u32 hash)
4304{ 633{
4305 if ((hash & 0x000000f0) != (dcb->location << 4)) 634 if ((hash & 0x000000f0) != (dcb->location << 4))
4306 return false; 635 return false;
@@ -4310,9 +639,9 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
4310 return false; 639 return false;
4311 640
4312 switch (dcb->type) { 641 switch (dcb->type) {
4313 case OUTPUT_TMDS: 642 case DCB_OUTPUT_TMDS:
4314 case OUTPUT_LVDS: 643 case DCB_OUTPUT_LVDS:
4315 case OUTPUT_DP: 644 case DCB_OUTPUT_DP:
4316 if (hash & 0x00c00000) { 645 if (hash & 0x00c00000) {
4317 if (!(hash & (dcb->sorconf.link << 22))) 646 if (!(hash & (dcb->sorconf.link << 22)))
4318 return false; 647 return false;
@@ -4324,7 +653,7 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
4324 653
4325int 654int
4326nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk, 655nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4327 struct dcb_entry *dcbent, int crtc) 656 struct dcb_output *dcbent, int crtc)
4328{ 657{
4329 /* 658 /*
4330 * The display script table is located by the BIT 'U' table. 659 * The display script table is located by the BIT 'U' table.
@@ -4349,15 +678,15 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4349 * offset + 5 (16 bits): pointer to first output script table 678 * offset + 5 (16 bits): pointer to first output script table
4350 */ 679 */
4351 680
4352 struct drm_nouveau_private *dev_priv = dev->dev_private; 681 struct nouveau_drm *drm = nouveau_drm(dev);
4353 struct nvbios *bios = &dev_priv->vbios; 682 struct nvbios *bios = &drm->vbios;
4354 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 683 uint8_t *table = &bios->data[bios->display.script_table_ptr];
4355 uint8_t *otable = NULL; 684 uint8_t *otable = NULL;
4356 uint16_t script; 685 uint16_t script;
4357 int i; 686 int i;
4358 687
4359 if (!bios->display.script_table_ptr) { 688 if (!bios->display.script_table_ptr) {
4360 NV_ERROR(dev, "No pointer to output script table\n"); 689 NV_ERROR(drm, "No pointer to output script table\n");
4361 return 1; 690 return 1;
4362 } 691 }
4363 692
@@ -4369,7 +698,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4369 return 1; 698 return 1;
4370 699
4371 if (table[0] != 0x20 && table[0] != 0x21) { 700 if (table[0] != 0x20 && table[0] != 0x21) {
4372 NV_ERROR(dev, "Output script table version 0x%02x unknown\n", 701 NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
4373 table[0]); 702 table[0]);
4374 return 1; 703 return 1;
4375 } 704 }
@@ -4404,7 +733,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4404 * script tables is a pointer to the script to execute. 733 * script tables is a pointer to the script to execute.
4405 */ 734 */
4406 735
4407 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", 736 NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
4408 dcbent->type, dcbent->location, dcbent->or); 737 dcbent->type, dcbent->location, dcbent->or);
4409 for (i = 0; i < table[3]; i++) { 738 for (i = 0; i < table[3]; i++) {
4410 otable = ROMPTR(dev, table[table[1] + (i * table[2])]); 739 otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
@@ -4413,7 +742,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4413 } 742 }
4414 743
4415 if (!otable) { 744 if (!otable) {
4416 NV_DEBUG_KMS(dev, "failed to match any output table\n"); 745 NV_DEBUG(drm, "failed to match any output table\n");
4417 return 1; 746 return 1;
4418 } 747 }
4419 748
@@ -4425,7 +754,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4425 } 754 }
4426 755
4427 if (i == otable[5]) { 756 if (i == otable[5]) {
4428 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " 757 NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
4429 "using first\n", 758 "using first\n",
4430 type, dcbent->type, dcbent->or); 759 type, dcbent->type, dcbent->or);
4431 i = 0; 760 i = 0;
@@ -4435,21 +764,21 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4435 if (pclk == 0) { 764 if (pclk == 0) {
4436 script = ROM16(otable[6]); 765 script = ROM16(otable[6]);
4437 if (!script) { 766 if (!script) {
4438 NV_DEBUG_KMS(dev, "output script 0 not found\n"); 767 NV_DEBUG(drm, "output script 0 not found\n");
4439 return 1; 768 return 1;
4440 } 769 }
4441 770
4442 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); 771 NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
4443 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 772 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4444 } else 773 } else
4445 if (pclk == -1) { 774 if (pclk == -1) {
4446 script = ROM16(otable[8]); 775 script = ROM16(otable[8]);
4447 if (!script) { 776 if (!script) {
4448 NV_DEBUG_KMS(dev, "output script 1 not found\n"); 777 NV_DEBUG(drm, "output script 1 not found\n");
4449 return 1; 778 return 1;
4450 } 779 }
4451 780
4452 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); 781 NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
4453 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 782 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4454 } else 783 } else
4455 if (pclk == -2) { 784 if (pclk == -2) {
@@ -4458,11 +787,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4458 else 787 else
4459 script = 0; 788 script = 0;
4460 if (!script) { 789 if (!script) {
4461 NV_DEBUG_KMS(dev, "output script 2 not found\n"); 790 NV_DEBUG(drm, "output script 2 not found\n");
4462 return 1; 791 return 1;
4463 } 792 }
4464 793
4465 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); 794 NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
4466 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 795 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4467 } else 796 } else
4468 if (pclk > 0) { 797 if (pclk > 0) {
@@ -4470,11 +799,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4470 if (script) 799 if (script)
4471 script = clkcmptable(bios, script, pclk); 800 script = clkcmptable(bios, script, pclk);
4472 if (!script) { 801 if (!script) {
4473 NV_DEBUG_KMS(dev, "clock script 0 not found\n"); 802 NV_DEBUG(drm, "clock script 0 not found\n");
4474 return 1; 803 return 1;
4475 } 804 }
4476 805
4477 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); 806 NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
4478 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 807 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4479 } else 808 } else
4480 if (pclk < 0) { 809 if (pclk < 0) {
@@ -4482,11 +811,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4482 if (script) 811 if (script)
4483 script = clkcmptable(bios, script, -pclk); 812 script = clkcmptable(bios, script, -pclk);
4484 if (!script) { 813 if (!script) {
4485 NV_DEBUG_KMS(dev, "clock script 1 not found\n"); 814 NV_DEBUG(drm, "clock script 1 not found\n");
4486 return 1; 815 return 1;
4487 } 816 }
4488 817
4489 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); 818 NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
4490 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 819 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4491 } 820 }
4492 821
@@ -4494,7 +823,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4494} 823}
4495 824
4496 825
4497int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk) 826int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
4498{ 827{
4499 /* 828 /*
4500 * the pxclk parameter is in kHz 829 * the pxclk parameter is in kHz
@@ -4505,8 +834,9 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4505 * ffs(or) == 3, use the second. 834 * ffs(or) == 3, use the second.
4506 */ 835 */
4507 836
4508 struct drm_nouveau_private *dev_priv = dev->dev_private; 837 struct nouveau_drm *drm = nouveau_drm(dev);
4509 struct nvbios *bios = &dev_priv->vbios; 838 struct nouveau_device *device = nv_device(drm->device);
839 struct nvbios *bios = &drm->vbios;
4510 int cv = bios->chip_version; 840 int cv = bios->chip_version;
4511 uint16_t clktable = 0, scriptptr; 841 uint16_t clktable = 0, scriptptr;
4512 uint32_t sel_clk_binding, sel_clk; 842 uint32_t sel_clk_binding, sel_clk;
@@ -4527,19 +857,19 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4527 } 857 }
4528 858
4529 if (!clktable) { 859 if (!clktable) {
4530 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 860 NV_ERROR(drm, "Pixel clock comparison table not found\n");
4531 return -EINVAL; 861 return -EINVAL;
4532 } 862 }
4533 863
4534 scriptptr = clkcmptable(bios, clktable, pxclk); 864 scriptptr = clkcmptable(bios, clktable, pxclk);
4535 865
4536 if (!scriptptr) { 866 if (!scriptptr) {
4537 NV_ERROR(dev, "TMDS output init script not found\n"); 867 NV_ERROR(drm, "TMDS output init script not found\n");
4538 return -ENOENT; 868 return -ENOENT;
4539 } 869 }
4540 870
4541 /* don't let script change pll->head binding */ 871 /* don't let script change pll->head binding */
4542 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; 872 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
4543 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); 873 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
4544 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 874 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
4545 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 875 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -4547,447 +877,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4547 return 0; 877 return 0;
4548} 878}
4549 879
4550struct pll_mapping {
4551 u8 type;
4552 u32 reg;
4553};
4554
4555static struct pll_mapping nv04_pll_mapping[] = {
4556 { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF },
4557 { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
4558 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4559 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4560 {}
4561};
4562
4563static struct pll_mapping nv40_pll_mapping[] = {
4564 { PLL_CORE , 0x004000 },
4565 { PLL_MEMORY, 0x004020 },
4566 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4567 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4568 {}
4569};
4570
4571static struct pll_mapping nv50_pll_mapping[] = {
4572 { PLL_CORE , 0x004028 },
4573 { PLL_SHADER, 0x004020 },
4574 { PLL_UNK03 , 0x004000 },
4575 { PLL_MEMORY, 0x004008 },
4576 { PLL_UNK40 , 0x00e810 },
4577 { PLL_UNK41 , 0x00e818 },
4578 { PLL_UNK42 , 0x00e824 },
4579 { PLL_VPLL0 , 0x614100 },
4580 { PLL_VPLL1 , 0x614900 },
4581 {}
4582};
4583
4584static struct pll_mapping nv84_pll_mapping[] = {
4585 { PLL_CORE , 0x004028 },
4586 { PLL_SHADER, 0x004020 },
4587 { PLL_MEMORY, 0x004008 },
4588 { PLL_VDEC , 0x004030 },
4589 { PLL_UNK41 , 0x00e818 },
4590 { PLL_VPLL0 , 0x614100 },
4591 { PLL_VPLL1 , 0x614900 },
4592 {}
4593};
4594
4595u32
4596get_pll_register(struct drm_device *dev, enum pll_types type)
4597{
4598 struct drm_nouveau_private *dev_priv = dev->dev_private;
4599 struct nvbios *bios = &dev_priv->vbios;
4600 struct pll_mapping *map;
4601 int i;
4602
4603 if (dev_priv->card_type < NV_40)
4604 map = nv04_pll_mapping;
4605 else
4606 if (dev_priv->card_type < NV_50)
4607 map = nv40_pll_mapping;
4608 else {
4609 u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
4610
4611 if (plim[0] >= 0x30) {
4612 u8 *entry = plim + plim[1];
4613 for (i = 0; i < plim[3]; i++, entry += plim[2]) {
4614 if (entry[0] == type)
4615 return ROM32(entry[3]);
4616 }
4617
4618 return 0;
4619 }
4620
4621 if (dev_priv->chipset == 0x50)
4622 map = nv50_pll_mapping;
4623 else
4624 map = nv84_pll_mapping;
4625 }
4626
4627 while (map->reg) {
4628 if (map->type == type)
4629 return map->reg;
4630 map++;
4631 }
4632
4633 return 0;
4634}
4635
4636int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
4637{
4638 /*
4639 * PLL limits table
4640 *
4641 * Version 0x10: NV30, NV31
4642 * One byte header (version), one record of 24 bytes
4643 * Version 0x11: NV36 - Not implemented
4644 * Seems to have same record style as 0x10, but 3 records rather than 1
4645 * Version 0x20: Found on Geforce 6 cards
4646 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
4647 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
4648 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
4649 * length in general, some (integrated) have an extra configuration byte
4650 * Version 0x30: Found on Geforce 8, separates the register mapping
4651 * from the limits tables.
4652 */
4653
4654 struct drm_nouveau_private *dev_priv = dev->dev_private;
4655 struct nvbios *bios = &dev_priv->vbios;
4656 int cv = bios->chip_version, pllindex = 0;
4657 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
4658 uint32_t crystal_strap_mask, crystal_straps;
4659
4660 if (!bios->pll_limit_tbl_ptr) {
4661 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
4662 cv >= 0x40) {
4663 NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
4664 return -EINVAL;
4665 }
4666 } else
4667 pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
4668
4669 crystal_strap_mask = 1 << 6;
4670 /* open coded dev->twoHeads test */
4671 if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
4672 crystal_strap_mask |= 1 << 22;
4673 crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
4674 crystal_strap_mask;
4675
4676 switch (pll_lim_ver) {
4677 /*
4678 * We use version 0 to indicate a pre limit table bios (single stage
4679 * pll) and load the hard coded limits instead.
4680 */
4681 case 0:
4682 break;
4683 case 0x10:
4684 case 0x11:
4685 /*
4686 * Strictly v0x11 has 3 entries, but the last two don't seem
4687 * to get used.
4688 */
4689 headerlen = 1;
4690 recordlen = 0x18;
4691 entries = 1;
4692 pllindex = 0;
4693 break;
4694 case 0x20:
4695 case 0x21:
4696 case 0x30:
4697 case 0x40:
4698 headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
4699 recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
4700 entries = bios->data[bios->pll_limit_tbl_ptr + 3];
4701 break;
4702 default:
4703 NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
4704 "supported\n", pll_lim_ver);
4705 return -ENOSYS;
4706 }
4707
4708 /* initialize all members to zero */
4709 memset(pll_lim, 0, sizeof(struct pll_lims));
4710
4711 /* if we were passed a type rather than a register, figure
4712 * out the register and store it
4713 */
4714 if (limit_match > PLL_MAX)
4715 pll_lim->reg = limit_match;
4716 else {
4717 pll_lim->reg = get_pll_register(dev, limit_match);
4718 if (!pll_lim->reg)
4719 return -ENOENT;
4720 }
4721
4722 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4723 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4724
4725 pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
4726 pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
4727 pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
4728 pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
4729 pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
4730 pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
4731 pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
4732
4733 /* these values taken from nv30/31/36 */
4734 pll_lim->vco1.min_n = 0x1;
4735 if (cv == 0x36)
4736 pll_lim->vco1.min_n = 0x5;
4737 pll_lim->vco1.max_n = 0xff;
4738 pll_lim->vco1.min_m = 0x1;
4739 pll_lim->vco1.max_m = 0xd;
4740 pll_lim->vco2.min_n = 0x4;
4741 /*
4742 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
4743 * table version (apart from nv35)), N2 is compared to
4744 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
4745 * save a comparison
4746 */
4747 pll_lim->vco2.max_n = 0x28;
4748 if (cv == 0x30 || cv == 0x35)
4749 /* only 5 bits available for N2 on nv30/35 */
4750 pll_lim->vco2.max_n = 0x1f;
4751 pll_lim->vco2.min_m = 0x1;
4752 pll_lim->vco2.max_m = 0x4;
4753 pll_lim->max_log2p = 0x7;
4754 pll_lim->max_usable_log2p = 0x6;
4755 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4756 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4757 uint8_t *pll_rec;
4758 int i;
4759
4760 /*
4761 * First entry is default match, if nothing better. warn if
4762 * reg field nonzero
4763 */
4764 if (ROM32(bios->data[plloffs]))
4765 NV_WARN(dev, "Default PLL limit entry has non-zero "
4766 "register field\n");
4767
4768 for (i = 1; i < entries; i++)
4769 if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
4770 pllindex = i;
4771 break;
4772 }
4773
4774 if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
4775 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4776 "limits table", pll_lim->reg);
4777 return -ENOENT;
4778 }
4779
4780 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4781
4782 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4783 pllindex ? pll_lim->reg : 0);
4784
4785 /*
4786 * Frequencies are stored in tables in MHz, kHz are more
4787 * useful, so we convert.
4788 */
4789
4790 /* What output frequencies can each VCO generate? */
4791 pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
4792 pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
4793 pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
4794 pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
4795
4796 /* What input frequencies they accept (past the m-divider)? */
4797 pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
4798 pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
4799 pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
4800 pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
4801
4802 /* What values are accepted as multiplier and divider? */
4803 pll_lim->vco1.min_n = pll_rec[20];
4804 pll_lim->vco1.max_n = pll_rec[21];
4805 pll_lim->vco1.min_m = pll_rec[22];
4806 pll_lim->vco1.max_m = pll_rec[23];
4807 pll_lim->vco2.min_n = pll_rec[24];
4808 pll_lim->vco2.max_n = pll_rec[25];
4809 pll_lim->vco2.min_m = pll_rec[26];
4810 pll_lim->vco2.max_m = pll_rec[27];
4811
4812 pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
4813 if (pll_lim->max_log2p > 0x7)
4814 /* pll decoding in nv_hw.c assumes never > 7 */
4815 NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
4816 pll_lim->max_log2p);
4817 if (cv < 0x60)
4818 pll_lim->max_usable_log2p = 0x6;
4819 pll_lim->log2p_bias = pll_rec[30];
4820
4821 if (recordlen > 0x22)
4822 pll_lim->refclk = ROM32(pll_rec[31]);
4823
4824 if (recordlen > 0x23 && pll_rec[35])
4825 NV_WARN(dev,
4826 "Bits set in PLL configuration byte (%x)\n",
4827 pll_rec[35]);
4828
4829 /* C51 special not seen elsewhere */
4830 if (cv == 0x51 && !pll_lim->refclk) {
4831 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4832
4833 if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
4834 (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
4835 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4836 pll_lim->refclk = 200000;
4837 else
4838 pll_lim->refclk = 25000;
4839 }
4840 }
4841 } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
4842 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4843 uint8_t *record = NULL;
4844 int i;
4845
4846 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4847 pll_lim->reg);
4848
4849 for (i = 0; i < entries; i++, entry += recordlen) {
4850 if (ROM32(entry[3]) == pll_lim->reg) {
4851 record = &bios->data[ROM16(entry[1])];
4852 break;
4853 }
4854 }
4855
4856 if (!record) {
4857 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4858 "limits table", pll_lim->reg);
4859 return -ENOENT;
4860 }
4861
4862 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4863 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4864 pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
4865 pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
4866 pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
4867 pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
4868 pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
4869 pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
4870 pll_lim->vco1.min_n = record[16];
4871 pll_lim->vco1.max_n = record[17];
4872 pll_lim->vco1.min_m = record[18];
4873 pll_lim->vco1.max_m = record[19];
4874 pll_lim->vco2.min_n = record[20];
4875 pll_lim->vco2.max_n = record[21];
4876 pll_lim->vco2.min_m = record[22];
4877 pll_lim->vco2.max_m = record[23];
4878 pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
4879 pll_lim->log2p_bias = record[27];
4880 pll_lim->refclk = ROM32(record[28]);
4881 } else if (pll_lim_ver) { /* ver 0x40 */
4882 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4883 uint8_t *record = NULL;
4884 int i;
4885
4886 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4887 pll_lim->reg);
4888
4889 for (i = 0; i < entries; i++, entry += recordlen) {
4890 if (ROM32(entry[3]) == pll_lim->reg) {
4891 record = &bios->data[ROM16(entry[1])];
4892 break;
4893 }
4894 }
4895
4896 if (!record) {
4897 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4898 "limits table", pll_lim->reg);
4899 return -ENOENT;
4900 }
4901
4902 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4903 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4904 pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
4905 pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
4906 pll_lim->vco1.min_m = record[8];
4907 pll_lim->vco1.max_m = record[9];
4908 pll_lim->vco1.min_n = record[10];
4909 pll_lim->vco1.max_n = record[11];
4910 pll_lim->min_p = record[12];
4911 pll_lim->max_p = record[13];
4912 pll_lim->refclk = ROM16(entry[9]) * 1000;
4913 }
4914
4915 /*
4916 * By now any valid limit table ought to have set a max frequency for
4917 * vco1, so if it's zero it's either a pre limit table bios, or one
4918 * with an empty limit table (seen on nv18)
4919 */
4920 if (!pll_lim->vco1.maxfreq) {
4921 pll_lim->vco1.minfreq = bios->fminvco;
4922 pll_lim->vco1.maxfreq = bios->fmaxvco;
4923 pll_lim->vco1.min_inputfreq = 0;
4924 pll_lim->vco1.max_inputfreq = INT_MAX;
4925 pll_lim->vco1.min_n = 0x1;
4926 pll_lim->vco1.max_n = 0xff;
4927 pll_lim->vco1.min_m = 0x1;
4928 if (crystal_straps == 0) {
4929 /* nv05 does this, nv11 doesn't, nv10 unknown */
4930 if (cv < 0x11)
4931 pll_lim->vco1.min_m = 0x7;
4932 pll_lim->vco1.max_m = 0xd;
4933 } else {
4934 if (cv < 0x11)
4935 pll_lim->vco1.min_m = 0x8;
4936 pll_lim->vco1.max_m = 0xe;
4937 }
4938 if (cv < 0x17 || cv == 0x1a || cv == 0x20)
4939 pll_lim->max_log2p = 4;
4940 else
4941 pll_lim->max_log2p = 5;
4942 pll_lim->max_usable_log2p = pll_lim->max_log2p;
4943 }
4944
4945 if (!pll_lim->refclk)
4946 switch (crystal_straps) {
4947 case 0:
4948 pll_lim->refclk = 13500;
4949 break;
4950 case (1 << 6):
4951 pll_lim->refclk = 14318;
4952 break;
4953 case (1 << 22):
4954 pll_lim->refclk = 27000;
4955 break;
4956 case (1 << 22 | 1 << 6):
4957 pll_lim->refclk = 25000;
4958 break;
4959 }
4960
4961 NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4962 NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4963 NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4964 NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4965 NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4966 NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4967 NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4968 NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4969 if (pll_lim->vco2.maxfreq) {
4970 NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4971 NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4972 NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4973 NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4974 NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4975 NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4976 NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4977 NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4978 }
4979 if (!pll_lim->max_p) {
4980 NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
4981 NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4982 } else {
4983 NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
4984 NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
4985 }
4986 NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
4987
4988 return 0;
4989}
4990
4991static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset) 880static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
4992{ 881{
4993 /* 882 /*
@@ -4996,10 +885,11 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
4996 * offset + 2 (8 bits): Chip version 885 * offset + 2 (8 bits): Chip version
4997 * offset + 3 (8 bits): Major version 886 * offset + 3 (8 bits): Major version
4998 */ 887 */
888 struct nouveau_drm *drm = nouveau_drm(dev);
4999 889
5000 bios->major_version = bios->data[offset + 3]; 890 bios->major_version = bios->data[offset + 3];
5001 bios->chip_version = bios->data[offset + 2]; 891 bios->chip_version = bios->data[offset + 2];
5002 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", 892 NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
5003 bios->data[offset + 3], bios->data[offset + 2], 893 bios->data[offset + 3], bios->data[offset + 2],
5004 bios->data[offset + 1], bios->data[offset]); 894 bios->data[offset + 1], bios->data[offset]);
5005} 895}
@@ -5035,25 +925,26 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5035 * offset + 0 (16 bits): loadval table pointer 925 * offset + 0 (16 bits): loadval table pointer
5036 */ 926 */
5037 927
928 struct nouveau_drm *drm = nouveau_drm(dev);
5038 uint16_t load_table_ptr; 929 uint16_t load_table_ptr;
5039 uint8_t version, headerlen, entrylen, num_entries; 930 uint8_t version, headerlen, entrylen, num_entries;
5040 931
5041 if (bitentry->length != 3) { 932 if (bitentry->length != 3) {
5042 NV_ERROR(dev, "Do not understand BIT A table\n"); 933 NV_ERROR(drm, "Do not understand BIT A table\n");
5043 return -EINVAL; 934 return -EINVAL;
5044 } 935 }
5045 936
5046 load_table_ptr = ROM16(bios->data[bitentry->offset]); 937 load_table_ptr = ROM16(bios->data[bitentry->offset]);
5047 938
5048 if (load_table_ptr == 0x0) { 939 if (load_table_ptr == 0x0) {
5049 NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n"); 940 NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n");
5050 return -EINVAL; 941 return -EINVAL;
5051 } 942 }
5052 943
5053 version = bios->data[load_table_ptr]; 944 version = bios->data[load_table_ptr];
5054 945
5055 if (version != 0x10) { 946 if (version != 0x10) {
5056 NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n", 947 NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n",
5057 version >> 4, version & 0xF); 948 version >> 4, version & 0xF);
5058 return -ENOSYS; 949 return -ENOSYS;
5059 } 950 }
@@ -5063,7 +954,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5063 num_entries = bios->data[load_table_ptr + 3]; 954 num_entries = bios->data[load_table_ptr + 3];
5064 955
5065 if (headerlen != 4 || entrylen != 4 || num_entries != 2) { 956 if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
5066 NV_ERROR(dev, "Do not understand BIT loadval table\n"); 957 NV_ERROR(drm, "Do not understand BIT loadval table\n");
5067 return -EINVAL; 958 return -EINVAL;
5068 } 959 }
5069 960
@@ -5080,9 +971,10 @@ static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5080 * 971 *
5081 * There's more in here, but that's unknown. 972 * There's more in here, but that's unknown.
5082 */ 973 */
974 struct nouveau_drm *drm = nouveau_drm(dev);
5083 975
5084 if (bitentry->length < 10) { 976 if (bitentry->length < 10) {
5085 NV_ERROR(dev, "Do not understand BIT C table\n"); 977 NV_ERROR(drm, "Do not understand BIT C table\n");
5086 return -EINVAL; 978 return -EINVAL;
5087 } 979 }
5088 980
@@ -5101,9 +993,10 @@ static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bi
5101 * records beginning with a freq. 993 * records beginning with a freq.
5102 * offset + 2 (16 bits): mode table pointer 994 * offset + 2 (16 bits): mode table pointer
5103 */ 995 */
996 struct nouveau_drm *drm = nouveau_drm(dev);
5104 997
5105 if (bitentry->length != 4) { 998 if (bitentry->length != 4) {
5106 NV_ERROR(dev, "Do not understand BIT display table\n"); 999 NV_ERROR(drm, "Do not understand BIT display table\n");
5107 return -EINVAL; 1000 return -EINVAL;
5108 } 1001 }
5109 1002
@@ -5119,9 +1012,10 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5119 * 1012 *
5120 * See parse_script_table_pointers for layout 1013 * See parse_script_table_pointers for layout
5121 */ 1014 */
1015 struct nouveau_drm *drm = nouveau_drm(dev);
5122 1016
5123 if (bitentry->length < 14) { 1017 if (bitentry->length < 14) {
5124 NV_ERROR(dev, "Do not understand init table\n"); 1018 NV_ERROR(drm, "Do not understand init table\n");
5125 return -EINVAL; 1019 return -EINVAL;
5126 } 1020 }
5127 1021
@@ -5148,11 +1042,12 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5148 * There's other things in the table, purpose unknown 1042 * There's other things in the table, purpose unknown
5149 */ 1043 */
5150 1044
1045 struct nouveau_drm *drm = nouveau_drm(dev);
5151 uint16_t daccmpoffset; 1046 uint16_t daccmpoffset;
5152 uint8_t dacver, dacheaderlen; 1047 uint8_t dacver, dacheaderlen;
5153 1048
5154 if (bitentry->length < 6) { 1049 if (bitentry->length < 6) {
5155 NV_ERROR(dev, "BIT i table too short for needed information\n"); 1050 NV_ERROR(drm, "BIT i table too short for needed information\n");
5156 return -EINVAL; 1051 return -EINVAL;
5157 } 1052 }
5158 1053
@@ -5166,7 +1061,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5166 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE; 1061 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
5167 1062
5168 if (bitentry->length < 15) { 1063 if (bitentry->length < 15) {
5169 NV_WARN(dev, "BIT i table not long enough for DAC load " 1064 NV_WARN(drm, "BIT i table not long enough for DAC load "
5170 "detection comparison table\n"); 1065 "detection comparison table\n");
5171 return -EINVAL; 1066 return -EINVAL;
5172 } 1067 }
@@ -5187,7 +1082,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5187 dacheaderlen = bios->data[daccmpoffset + 1]; 1082 dacheaderlen = bios->data[daccmpoffset + 1];
5188 1083
5189 if (dacver != 0x00 && dacver != 0x10) { 1084 if (dacver != 0x00 && dacver != 0x10) {
5190 NV_WARN(dev, "DAC load detection comparison table version " 1085 NV_WARN(drm, "DAC load detection comparison table version "
5191 "%d.%d not known\n", dacver >> 4, dacver & 0xf); 1086 "%d.%d not known\n", dacver >> 4, dacver & 0xf);
5192 return -ENOSYS; 1087 return -ENOSYS;
5193 } 1088 }
@@ -5207,8 +1102,10 @@ static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5207 * offset + 0 (16 bits): LVDS strap xlate table pointer 1102 * offset + 0 (16 bits): LVDS strap xlate table pointer
5208 */ 1103 */
5209 1104
1105 struct nouveau_drm *drm = nouveau_drm(dev);
1106
5210 if (bitentry->length != 2) { 1107 if (bitentry->length != 2) {
5211 NV_ERROR(dev, "Do not understand BIT LVDS table\n"); 1108 NV_ERROR(drm, "Do not understand BIT LVDS table\n");
5212 return -EINVAL; 1109 return -EINVAL;
5213 } 1110 }
5214 1111
@@ -5278,20 +1175,21 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5278 * "or" from the DCB. 1175 * "or" from the DCB.
5279 */ 1176 */
5280 1177
1178 struct nouveau_drm *drm = nouveau_drm(dev);
5281 uint16_t tmdstableptr, script1, script2; 1179 uint16_t tmdstableptr, script1, script2;
5282 1180
5283 if (bitentry->length != 2) { 1181 if (bitentry->length != 2) {
5284 NV_ERROR(dev, "Do not understand BIT TMDS table\n"); 1182 NV_ERROR(drm, "Do not understand BIT TMDS table\n");
5285 return -EINVAL; 1183 return -EINVAL;
5286 } 1184 }
5287 1185
5288 tmdstableptr = ROM16(bios->data[bitentry->offset]); 1186 tmdstableptr = ROM16(bios->data[bitentry->offset]);
5289 if (!tmdstableptr) { 1187 if (!tmdstableptr) {
5290 NV_ERROR(dev, "Pointer to TMDS table invalid\n"); 1188 NV_ERROR(drm, "Pointer to TMDS table invalid\n");
5291 return -EINVAL; 1189 return -EINVAL;
5292 } 1190 }
5293 1191
5294 NV_INFO(dev, "TMDS table version %d.%d\n", 1192 NV_INFO(drm, "TMDS table version %d.%d\n",
5295 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); 1193 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5296 1194
5297 /* nv50+ has v2.0, but we don't parse it atm */ 1195 /* nv50+ has v2.0, but we don't parse it atm */
@@ -5305,7 +1203,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5305 script1 = ROM16(bios->data[tmdstableptr + 7]); 1203 script1 = ROM16(bios->data[tmdstableptr + 7]);
5306 script2 = ROM16(bios->data[tmdstableptr + 9]); 1204 script2 = ROM16(bios->data[tmdstableptr + 9]);
5307 if (bios->data[script1] != 'q' || bios->data[script2] != 'q') 1205 if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
5308 NV_WARN(dev, "TMDS table script pointers not stubbed\n"); 1206 NV_WARN(drm, "TMDS table script pointers not stubbed\n");
5309 1207
5310 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]); 1208 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
5311 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]); 1209 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
@@ -5325,10 +1223,11 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5325 * offset + 0 (16 bits): output script table pointer 1223 * offset + 0 (16 bits): output script table pointer
5326 */ 1224 */
5327 1225
1226 struct nouveau_drm *drm = nouveau_drm(dev);
5328 uint16_t outputscripttableptr; 1227 uint16_t outputscripttableptr;
5329 1228
5330 if (bitentry->length != 3) { 1229 if (bitentry->length != 3) {
5331 NV_ERROR(dev, "Do not understand BIT U table\n"); 1230 NV_ERROR(drm, "Do not understand BIT U table\n");
5332 return -EINVAL; 1231 return -EINVAL;
5333 } 1232 }
5334 1233
@@ -5347,8 +1246,8 @@ struct bit_table {
5347int 1246int
5348bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) 1247bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
5349{ 1248{
5350 struct drm_nouveau_private *dev_priv = dev->dev_private; 1249 struct nouveau_drm *drm = nouveau_drm(dev);
5351 struct nvbios *bios = &dev_priv->vbios; 1250 struct nvbios *bios = &drm->vbios;
5352 u8 entries, *entry; 1251 u8 entries, *entry;
5353 1252
5354 if (bios->type != NVBIOS_BIT) 1253 if (bios->type != NVBIOS_BIT)
@@ -5377,12 +1276,13 @@ parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
5377 struct bit_table *table) 1276 struct bit_table *table)
5378{ 1277{
5379 struct drm_device *dev = bios->dev; 1278 struct drm_device *dev = bios->dev;
1279 struct nouveau_drm *drm = nouveau_drm(dev);
5380 struct bit_entry bitentry; 1280 struct bit_entry bitentry;
5381 1281
5382 if (bit_table(dev, table->id, &bitentry) == 0) 1282 if (bit_table(dev, table->id, &bitentry) == 0)
5383 return table->parse_fn(dev, bios, &bitentry); 1283 return table->parse_fn(dev, bios, &bitentry);
5384 1284
5385 NV_INFO(dev, "BIT table '%c' not found\n", table->id); 1285 NV_INFO(drm, "BIT table '%c' not found\n", table->id);
5386 return -ENOSYS; 1286 return -ENOSYS;
5387} 1287}
5388 1288
@@ -5462,6 +1362,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5462 * offset + 156: minimum pixel clock for LVDS dual link 1362 * offset + 156: minimum pixel clock for LVDS dual link
5463 */ 1363 */
5464 1364
1365 struct nouveau_drm *drm = nouveau_drm(dev);
5465 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor; 1366 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
5466 uint16_t bmplength; 1367 uint16_t bmplength;
5467 uint16_t legacy_scripts_offset, legacy_i2c_offset; 1368 uint16_t legacy_scripts_offset, legacy_i2c_offset;
@@ -5475,7 +1376,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5475 bmp_version_major = bmp[5]; 1376 bmp_version_major = bmp[5];
5476 bmp_version_minor = bmp[6]; 1377 bmp_version_minor = bmp[6];
5477 1378
5478 NV_TRACE(dev, "BMP version %d.%d\n", 1379 NV_INFO(drm, "BMP version %d.%d\n",
5479 bmp_version_major, bmp_version_minor); 1380 bmp_version_major, bmp_version_minor);
5480 1381
5481 /* 1382 /*
@@ -5491,7 +1392,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5491 * happened instead. 1392 * happened instead.
5492 */ 1393 */
5493 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) { 1394 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
5494 NV_ERROR(dev, "You have an unsupported BMP version. " 1395 NV_ERROR(drm, "You have an unsupported BMP version. "
5495 "Please send in your bios\n"); 1396 "Please send in your bios\n");
5496 return -ENOSYS; 1397 return -ENOSYS;
5497 } 1398 }
@@ -5540,7 +1441,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5540 1441
5541 /* checksum */ 1442 /* checksum */
5542 if (nv_cksum(bmp, 8)) { 1443 if (nv_cksum(bmp, 8)) {
5543 NV_ERROR(dev, "Bad BMP checksum\n"); 1444 NV_ERROR(drm, "Bad BMP checksum\n");
5544 return -EINVAL; 1445 return -EINVAL;
5545 } 1446 }
5546 1447
@@ -5625,20 +1526,20 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5625} 1526}
5626 1527
5627void * 1528void *
5628dcb_table(struct drm_device *dev) 1529olddcb_table(struct drm_device *dev)
5629{ 1530{
5630 struct drm_nouveau_private *dev_priv = dev->dev_private; 1531 struct nouveau_drm *drm = nouveau_drm(dev);
5631 u8 *dcb = NULL; 1532 u8 *dcb = NULL;
5632 1533
5633 if (dev_priv->card_type > NV_04) 1534 if (nv_device(drm->device)->card_type > NV_04)
5634 dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]); 1535 dcb = ROMPTR(dev, drm->vbios.data[0x36]);
5635 if (!dcb) { 1536 if (!dcb) {
5636 NV_WARNONCE(dev, "No DCB data found in VBIOS\n"); 1537 NV_WARN(drm, "No DCB data found in VBIOS\n");
5637 return NULL; 1538 return NULL;
5638 } 1539 }
5639 1540
5640 if (dcb[0] >= 0x41) { 1541 if (dcb[0] >= 0x41) {
5641 NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]); 1542 NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
5642 return NULL; 1543 return NULL;
5643 } else 1544 } else
5644 if (dcb[0] >= 0x30) { 1545 if (dcb[0] >= 0x30) {
@@ -5670,18 +1571,18 @@ dcb_table(struct drm_device *dev)
5670 * 1571 *
5671 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful 1572 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5672 */ 1573 */
5673 NV_WARNONCE(dev, "No useful DCB data in VBIOS\n"); 1574 NV_WARN(drm, "No useful DCB data in VBIOS\n");
5674 return NULL; 1575 return NULL;
5675 } 1576 }
5676 1577
5677 NV_WARNONCE(dev, "DCB header validation failed\n"); 1578 NV_WARN(drm, "DCB header validation failed\n");
5678 return NULL; 1579 return NULL;
5679} 1580}
5680 1581
5681void * 1582void *
5682dcb_outp(struct drm_device *dev, u8 idx) 1583olddcb_outp(struct drm_device *dev, u8 idx)
5683{ 1584{
5684 u8 *dcb = dcb_table(dev); 1585 u8 *dcb = olddcb_table(dev);
5685 if (dcb && dcb[0] >= 0x30) { 1586 if (dcb && dcb[0] >= 0x30) {
5686 if (idx < dcb[2]) 1587 if (idx < dcb[2])
5687 return dcb + dcb[1] + (idx * dcb[3]); 1588 return dcb + dcb[1] + (idx * dcb[3]);
@@ -5703,20 +1604,20 @@ dcb_outp(struct drm_device *dev, u8 idx)
5703} 1604}
5704 1605
5705int 1606int
5706dcb_outp_foreach(struct drm_device *dev, void *data, 1607olddcb_outp_foreach(struct drm_device *dev, void *data,
5707 int (*exec)(struct drm_device *, void *, int idx, u8 *outp)) 1608 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
5708{ 1609{
5709 int ret, idx = -1; 1610 int ret, idx = -1;
5710 u8 *outp = NULL; 1611 u8 *outp = NULL;
5711 while ((outp = dcb_outp(dev, ++idx))) { 1612 while ((outp = olddcb_outp(dev, ++idx))) {
5712 if (ROM32(outp[0]) == 0x00000000) 1613 if (ROM32(outp[0]) == 0x00000000)
5713 break; /* seen on an NV11 with DCB v1.5 */ 1614 break; /* seen on an NV11 with DCB v1.5 */
5714 if (ROM32(outp[0]) == 0xffffffff) 1615 if (ROM32(outp[0]) == 0xffffffff)
5715 break; /* seen on an NV17 with DCB v2.0 */ 1616 break; /* seen on an NV17 with DCB v2.0 */
5716 1617
5717 if ((outp[0] & 0x0f) == OUTPUT_UNUSED) 1618 if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED)
5718 continue; 1619 continue;
5719 if ((outp[0] & 0x0f) == OUTPUT_EOL) 1620 if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL)
5720 break; 1621 break;
5721 1622
5722 ret = exec(dev, data, idx, outp); 1623 ret = exec(dev, data, idx, outp);
@@ -5728,9 +1629,9 @@ dcb_outp_foreach(struct drm_device *dev, void *data,
5728} 1629}
5729 1630
5730u8 * 1631u8 *
5731dcb_conntab(struct drm_device *dev) 1632olddcb_conntab(struct drm_device *dev)
5732{ 1633{
5733 u8 *dcb = dcb_table(dev); 1634 u8 *dcb = olddcb_table(dev);
5734 if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) { 1635 if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
5735 u8 *conntab = ROMPTR(dev, dcb[0x14]); 1636 u8 *conntab = ROMPTR(dev, dcb[0x14]);
5736 if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40) 1637 if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
@@ -5740,19 +1641,19 @@ dcb_conntab(struct drm_device *dev)
5740} 1641}
5741 1642
5742u8 * 1643u8 *
5743dcb_conn(struct drm_device *dev, u8 idx) 1644olddcb_conn(struct drm_device *dev, u8 idx)
5744{ 1645{
5745 u8 *conntab = dcb_conntab(dev); 1646 u8 *conntab = olddcb_conntab(dev);
5746 if (conntab && idx < conntab[2]) 1647 if (conntab && idx < conntab[2])
5747 return conntab + conntab[1] + (idx * conntab[3]); 1648 return conntab + conntab[1] + (idx * conntab[3]);
5748 return NULL; 1649 return NULL;
5749} 1650}
5750 1651
5751static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) 1652static struct dcb_output *new_dcb_entry(struct dcb_table *dcb)
5752{ 1653{
5753 struct dcb_entry *entry = &dcb->entry[dcb->entries]; 1654 struct dcb_output *entry = &dcb->entry[dcb->entries];
5754 1655
5755 memset(entry, 0, sizeof(struct dcb_entry)); 1656 memset(entry, 0, sizeof(struct dcb_output));
5756 entry->index = dcb->entries++; 1657 entry->index = dcb->entries++;
5757 1658
5758 return entry; 1659 return entry;
@@ -5761,20 +1662,22 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5761static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, 1662static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
5762 int heads, int or) 1663 int heads, int or)
5763{ 1664{
5764 struct dcb_entry *entry = new_dcb_entry(dcb); 1665 struct dcb_output *entry = new_dcb_entry(dcb);
5765 1666
5766 entry->type = type; 1667 entry->type = type;
5767 entry->i2c_index = i2c; 1668 entry->i2c_index = i2c;
5768 entry->heads = heads; 1669 entry->heads = heads;
5769 if (type != OUTPUT_ANALOG) 1670 if (type != DCB_OUTPUT_ANALOG)
5770 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ 1671 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5771 entry->or = or; 1672 entry->or = or;
5772} 1673}
5773 1674
5774static bool 1675static bool
5775parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, 1676parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5776 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 1677 uint32_t conn, uint32_t conf, struct dcb_output *entry)
5777{ 1678{
1679 struct nouveau_drm *drm = nouveau_drm(dev);
1680
5778 entry->type = conn & 0xf; 1681 entry->type = conn & 0xf;
5779 entry->i2c_index = (conn >> 4) & 0xf; 1682 entry->i2c_index = (conn >> 4) & 0xf;
5780 entry->heads = (conn >> 8) & 0xf; 1683 entry->heads = (conn >> 8) & 0xf;
@@ -5784,7 +1687,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5784 entry->or = (conn >> 24) & 0xf; 1687 entry->or = (conn >> 24) & 0xf;
5785 1688
5786 switch (entry->type) { 1689 switch (entry->type) {
5787 case OUTPUT_ANALOG: 1690 case DCB_OUTPUT_ANALOG:
5788 /* 1691 /*
5789 * Although the rest of a CRT conf dword is usually 1692 * Although the rest of a CRT conf dword is usually
5790 * zeros, mac biosen have stuff there so we must mask 1693 * zeros, mac biosen have stuff there so we must mask
@@ -5793,7 +1696,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5793 (conf & 0xffff) * 10 : 1696 (conf & 0xffff) * 10 :
5794 (conf & 0xff) * 10000; 1697 (conf & 0xff) * 10000;
5795 break; 1698 break;
5796 case OUTPUT_LVDS: 1699 case DCB_OUTPUT_LVDS:
5797 { 1700 {
5798 uint32_t mask; 1701 uint32_t mask;
5799 if (conf & 0x1) 1702 if (conf & 0x1)
@@ -5828,12 +1731,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5828 if (dcb->version >= 0x40) 1731 if (dcb->version >= 0x40)
5829 break; 1732 break;
5830 1733
5831 NV_ERROR(dev, "Unknown LVDS configuration bits, " 1734 NV_ERROR(drm, "Unknown LVDS configuration bits, "
5832 "please report\n"); 1735 "please report\n");
5833 } 1736 }
5834 break; 1737 break;
5835 } 1738 }
5836 case OUTPUT_TV: 1739 case DCB_OUTPUT_TV:
5837 { 1740 {
5838 if (dcb->version >= 0x30) 1741 if (dcb->version >= 0x30)
5839 entry->tvconf.has_component_output = conf & (0x8 << 4); 1742 entry->tvconf.has_component_output = conf & (0x8 << 4);
@@ -5842,7 +1745,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5842 1745
5843 break; 1746 break;
5844 } 1747 }
5845 case OUTPUT_DP: 1748 case DCB_OUTPUT_DP:
5846 entry->dpconf.sor.link = (conf & 0x00000030) >> 4; 1749 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
5847 switch ((conf & 0x00e00000) >> 21) { 1750 switch ((conf & 0x00e00000) >> 21) {
5848 case 0: 1751 case 0:
@@ -5864,7 +1767,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5864 break; 1767 break;
5865 } 1768 }
5866 break; 1769 break;
5867 case OUTPUT_TMDS: 1770 case DCB_OUTPUT_TMDS:
5868 if (dcb->version >= 0x40) 1771 if (dcb->version >= 0x40)
5869 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; 1772 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
5870 else if (dcb->version >= 0x30) 1773 else if (dcb->version >= 0x30)
@@ -5873,7 +1776,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5873 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 1776 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
5874 1777
5875 break; 1778 break;
5876 case OUTPUT_EOL: 1779 case DCB_OUTPUT_EOL:
5877 /* weird g80 mobile type that "nv" treats as a terminator */ 1780 /* weird g80 mobile type that "nv" treats as a terminator */
5878 dcb->entries--; 1781 dcb->entries--;
5879 return false; 1782 return false;
@@ -5900,27 +1803,29 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5900 1803
5901static bool 1804static bool
5902parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, 1805parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5903 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 1806 uint32_t conn, uint32_t conf, struct dcb_output *entry)
5904{ 1807{
1808 struct nouveau_drm *drm = nouveau_drm(dev);
1809
5905 switch (conn & 0x0000000f) { 1810 switch (conn & 0x0000000f) {
5906 case 0: 1811 case 0:
5907 entry->type = OUTPUT_ANALOG; 1812 entry->type = DCB_OUTPUT_ANALOG;
5908 break; 1813 break;
5909 case 1: 1814 case 1:
5910 entry->type = OUTPUT_TV; 1815 entry->type = DCB_OUTPUT_TV;
5911 break; 1816 break;
5912 case 2: 1817 case 2:
5913 case 4: 1818 case 4:
5914 if (conn & 0x10) 1819 if (conn & 0x10)
5915 entry->type = OUTPUT_LVDS; 1820 entry->type = DCB_OUTPUT_LVDS;
5916 else 1821 else
5917 entry->type = OUTPUT_TMDS; 1822 entry->type = DCB_OUTPUT_TMDS;
5918 break; 1823 break;
5919 case 3: 1824 case 3:
5920 entry->type = OUTPUT_LVDS; 1825 entry->type = DCB_OUTPUT_LVDS;
5921 break; 1826 break;
5922 default: 1827 default:
5923 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 1828 NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f);
5924 return false; 1829 return false;
5925 } 1830 }
5926 1831
@@ -5932,13 +1837,13 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5932 entry->duallink_possible = false; 1837 entry->duallink_possible = false;
5933 1838
5934 switch (entry->type) { 1839 switch (entry->type) {
5935 case OUTPUT_ANALOG: 1840 case DCB_OUTPUT_ANALOG:
5936 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 1841 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5937 break; 1842 break;
5938 case OUTPUT_TV: 1843 case DCB_OUTPUT_TV:
5939 entry->tvconf.has_component_output = false; 1844 entry->tvconf.has_component_output = false;
5940 break; 1845 break;
5941 case OUTPUT_LVDS: 1846 case DCB_OUTPUT_LVDS:
5942 if ((conn & 0x00003f00) >> 8 != 0x10) 1847 if ((conn & 0x00003f00) >> 8 != 0x10)
5943 entry->lvdsconf.use_straps_for_mode = true; 1848 entry->lvdsconf.use_straps_for_mode = true;
5944 entry->lvdsconf.use_power_scripts = true; 1849 entry->lvdsconf.use_power_scripts = true;
@@ -5959,14 +1864,15 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5959 * more options 1864 * more options
5960 */ 1865 */
5961 1866
1867 struct nouveau_drm *drm = nouveau_drm(dev);
5962 int i, newentries = 0; 1868 int i, newentries = 0;
5963 1869
5964 for (i = 0; i < dcb->entries; i++) { 1870 for (i = 0; i < dcb->entries; i++) {
5965 struct dcb_entry *ient = &dcb->entry[i]; 1871 struct dcb_output *ient = &dcb->entry[i];
5966 int j; 1872 int j;
5967 1873
5968 for (j = i + 1; j < dcb->entries; j++) { 1874 for (j = i + 1; j < dcb->entries; j++) {
5969 struct dcb_entry *jent = &dcb->entry[j]; 1875 struct dcb_output *jent = &dcb->entry[j];
5970 1876
5971 if (jent->type == 100) /* already merged entry */ 1877 if (jent->type == 100) /* already merged entry */
5972 continue; 1878 continue;
@@ -5976,7 +1882,7 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5976 jent->type == ient->type && 1882 jent->type == ient->type &&
5977 jent->location == ient->location && 1883 jent->location == ient->location &&
5978 jent->or == ient->or) { 1884 jent->or == ient->or) {
5979 NV_TRACE(dev, "Merging DCB entries %d and %d\n", 1885 NV_INFO(drm, "Merging DCB entries %d and %d\n",
5980 i, j); 1886 i, j);
5981 ient->heads |= jent->heads; 1887 ient->heads |= jent->heads;
5982 jent->type = 100; /* dummy value */ 1888 jent->type = 100; /* dummy value */
@@ -6002,8 +1908,8 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
6002static bool 1908static bool
6003apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) 1909apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6004{ 1910{
6005 struct drm_nouveau_private *dev_priv = dev->dev_private; 1911 struct nouveau_drm *drm = nouveau_drm(dev);
6006 struct dcb_table *dcb = &dev_priv->vbios.dcb; 1912 struct dcb_table *dcb = &drm->vbios.dcb;
6007 1913
6008 /* Dell Precision M6300 1914 /* Dell Precision M6300
6009 * DCB entry 2: 02025312 00000010 1915 * DCB entry 2: 02025312 00000010
@@ -6029,7 +1935,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6029 */ 1935 */
6030 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { 1936 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
6031 if (*conn == 0xf2005014 && *conf == 0xffffffff) { 1937 if (*conn == 0xf2005014 && *conf == 0xffffffff) {
6032 fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1); 1938 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
6033 return false; 1939 return false;
6034 } 1940 }
6035 } 1941 }
@@ -6115,24 +2021,24 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6115#ifdef __powerpc__ 2021#ifdef __powerpc__
6116 /* Apple iMac G4 NV17 */ 2022 /* Apple iMac G4 NV17 */
6117 if (of_machine_is_compatible("PowerMac4,5")) { 2023 if (of_machine_is_compatible("PowerMac4,5")) {
6118 fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1); 2024 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
6119 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2); 2025 fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
6120 return; 2026 return;
6121 } 2027 }
6122#endif 2028#endif
6123 2029
6124 /* Make up some sane defaults */ 2030 /* Make up some sane defaults */
6125 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 2031 fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
6126 bios->legacy.i2c_indices.crt, 1, 1); 2032 bios->legacy.i2c_indices.crt, 1, 1);
6127 2033
6128 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) 2034 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6129 fabricate_dcb_output(dcb, OUTPUT_TV, 2035 fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
6130 bios->legacy.i2c_indices.tv, 2036 bios->legacy.i2c_indices.tv,
6131 all_heads, 0); 2037 all_heads, 0);
6132 2038
6133 else if (bios->tmds.output0_script_ptr || 2039 else if (bios->tmds.output0_script_ptr ||
6134 bios->tmds.output1_script_ptr) 2040 bios->tmds.output1_script_ptr)
6135 fabricate_dcb_output(dcb, OUTPUT_TMDS, 2041 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
6136 bios->legacy.i2c_indices.panel, 2042 bios->legacy.i2c_indices.panel,
6137 all_heads, 1); 2043 all_heads, 1);
6138} 2044}
@@ -6140,16 +2046,16 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6140static int 2046static int
6141parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp) 2047parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
6142{ 2048{
6143 struct drm_nouveau_private *dev_priv = dev->dev_private; 2049 struct nouveau_drm *drm = nouveau_drm(dev);
6144 struct dcb_table *dcb = &dev_priv->vbios.dcb; 2050 struct dcb_table *dcb = &drm->vbios.dcb;
6145 u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]); 2051 u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
6146 u32 conn = ROM32(outp[0]); 2052 u32 conn = ROM32(outp[0]);
6147 bool ret; 2053 bool ret;
6148 2054
6149 if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) { 2055 if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
6150 struct dcb_entry *entry = new_dcb_entry(dcb); 2056 struct dcb_output *entry = new_dcb_entry(dcb);
6151 2057
6152 NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf); 2058 NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
6153 2059
6154 if (dcb->version >= 0x20) 2060 if (dcb->version >= 0x20)
6155 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); 2061 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
@@ -6162,7 +2068,7 @@ parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
6162 * are cards with bogus values (nv31m in bug 23212), 2068 * are cards with bogus values (nv31m in bug 23212),
6163 * and it's otherwise useless. 2069 * and it's otherwise useless.
6164 */ 2070 */
6165 if (entry->type == OUTPUT_TV && 2071 if (entry->type == DCB_OUTPUT_TV &&
6166 entry->location == DCB_LOC_ON_CHIP) 2072 entry->location == DCB_LOC_ON_CHIP)
6167 entry->i2c_index = 0x0f; 2073 entry->i2c_index = 0x0f;
6168 } 2074 }
@@ -6210,7 +2116,7 @@ dcb_fake_connectors(struct nvbios *bios)
6210 * table - just in case it has random, rather than stub, entries. 2116 * table - just in case it has random, rather than stub, entries.
6211 */ 2117 */
6212 if (i > 1) { 2118 if (i > 1) {
6213 u8 *conntab = dcb_conntab(bios->dev); 2119 u8 *conntab = olddcb_conntab(bios->dev);
6214 if (conntab) 2120 if (conntab)
6215 conntab[0] = 0x00; 2121 conntab[0] = 0x00;
6216 } 2122 }
@@ -6219,11 +2125,12 @@ dcb_fake_connectors(struct nvbios *bios)
6219static int 2125static int
6220parse_dcb_table(struct drm_device *dev, struct nvbios *bios) 2126parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6221{ 2127{
2128 struct nouveau_drm *drm = nouveau_drm(dev);
6222 struct dcb_table *dcb = &bios->dcb; 2129 struct dcb_table *dcb = &bios->dcb;
6223 u8 *dcbt, *conn; 2130 u8 *dcbt, *conn;
6224 int idx; 2131 int idx;
6225 2132
6226 dcbt = dcb_table(dev); 2133 dcbt = olddcb_table(dev);
6227 if (!dcbt) { 2134 if (!dcbt) {
6228 /* handle pre-DCB boards */ 2135 /* handle pre-DCB boards */
6229 if (bios->type == NVBIOS_BMP) { 2136 if (bios->type == NVBIOS_BMP) {
@@ -6234,10 +2141,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6234 return -EINVAL; 2141 return -EINVAL;
6235 } 2142 }
6236 2143
6237 NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf); 2144 NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
6238 2145
6239 dcb->version = dcbt[0]; 2146 dcb->version = dcbt[0];
6240 dcb_outp_foreach(dev, NULL, parse_dcb_entry); 2147 olddcb_outp_foreach(dev, NULL, parse_dcb_entry);
6241 2148
6242 /* 2149 /*
6243 * apart for v2.1+ not being known for requiring merging, this 2150 * apart for v2.1+ not being known for requiring merging, this
@@ -6251,10 +2158,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6251 2158
6252 /* dump connector table entries to log, if any exist */ 2159 /* dump connector table entries to log, if any exist */
6253 idx = -1; 2160 idx = -1;
6254 while ((conn = dcb_conn(dev, ++idx))) { 2161 while ((conn = olddcb_conn(dev, ++idx))) {
6255 if (conn[0] != 0xff) { 2162 if (conn[0] != 0xff) {
6256 NV_TRACE(dev, "DCB conn %02d: ", idx); 2163 NV_INFO(drm, "DCB conn %02d: ", idx);
6257 if (dcb_conntab(dev)[3] < 4) 2164 if (olddcb_conntab(dev)[3] < 4)
6258 printk("%04x\n", ROM16(conn[0])); 2165 printk("%04x\n", ROM16(conn[0]));
6259 else 2166 else
6260 printk("%08x\n", ROM32(conn[0])); 2167 printk("%08x\n", ROM32(conn[0]));
@@ -6275,12 +2182,14 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
6275 * starting at reg 0x00001400 2182 * starting at reg 0x00001400
6276 */ 2183 */
6277 2184
2185 struct nouveau_drm *drm = nouveau_drm(dev);
2186 struct nouveau_device *device = nv_device(drm->device);
6278 uint8_t bytes_to_write; 2187 uint8_t bytes_to_write;
6279 uint16_t hwsq_entry_offset; 2188 uint16_t hwsq_entry_offset;
6280 int i; 2189 int i;
6281 2190
6282 if (bios->data[hwsq_offset] <= entry) { 2191 if (bios->data[hwsq_offset] <= entry) {
6283 NV_ERROR(dev, "Too few entries in HW sequencer table for " 2192 NV_ERROR(drm, "Too few entries in HW sequencer table for "
6284 "requested entry\n"); 2193 "requested entry\n");
6285 return -ENOENT; 2194 return -ENOENT;
6286 } 2195 }
@@ -6288,24 +2197,24 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
6288 bytes_to_write = bios->data[hwsq_offset + 1]; 2197 bytes_to_write = bios->data[hwsq_offset + 1];
6289 2198
6290 if (bytes_to_write != 36) { 2199 if (bytes_to_write != 36) {
6291 NV_ERROR(dev, "Unknown HW sequencer entry size\n"); 2200 NV_ERROR(drm, "Unknown HW sequencer entry size\n");
6292 return -EINVAL; 2201 return -EINVAL;
6293 } 2202 }
6294 2203
6295 NV_TRACE(dev, "Loading NV17 power sequencing microcode\n"); 2204 NV_INFO(drm, "Loading NV17 power sequencing microcode\n");
6296 2205
6297 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; 2206 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
6298 2207
6299 /* set sequencer control */ 2208 /* set sequencer control */
6300 bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); 2209 nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
6301 bytes_to_write -= 4; 2210 bytes_to_write -= 4;
6302 2211
6303 /* write ucode */ 2212 /* write ucode */
6304 for (i = 0; i < bytes_to_write; i += 4) 2213 for (i = 0; i < bytes_to_write; i += 4)
6305 bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); 2214 nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
6306 2215
6307 /* twiddle NV_PBUS_DEBUG_4 */ 2216 /* twiddle NV_PBUS_DEBUG_4 */
6308 bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18); 2217 nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
6309 2218
6310 return 0; 2219 return 0;
6311} 2220}
@@ -6336,8 +2245,8 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
6336 2245
6337uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) 2246uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
6338{ 2247{
6339 struct drm_nouveau_private *dev_priv = dev->dev_private; 2248 struct nouveau_drm *drm = nouveau_drm(dev);
6340 struct nvbios *bios = &dev_priv->vbios; 2249 struct nvbios *bios = &drm->vbios;
6341 const uint8_t edid_sig[] = { 2250 const uint8_t edid_sig[] = {
6342 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 2251 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6343 uint16_t offset = 0; 2252 uint16_t offset = 0;
@@ -6360,53 +2269,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
6360 offset++; 2269 offset++;
6361 } 2270 }
6362 2271
6363 NV_TRACE(dev, "Found EDID in BIOS\n"); 2272 NV_INFO(drm, "Found EDID in BIOS\n");
6364 2273
6365 return bios->fp.edid = &bios->data[offset]; 2274 return bios->fp.edid = &bios->data[offset];
6366} 2275}
6367 2276
6368void
6369nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
6370 struct dcb_entry *dcbent, int crtc)
6371{
6372 struct drm_nouveau_private *dev_priv = dev->dev_private;
6373 struct nvbios *bios = &dev_priv->vbios;
6374 struct init_exec iexec = { true, false };
6375
6376 spin_lock_bh(&bios->lock);
6377 bios->display.output = dcbent;
6378 bios->display.crtc = crtc;
6379 parse_init_table(bios, table, &iexec);
6380 bios->display.output = NULL;
6381 spin_unlock_bh(&bios->lock);
6382}
6383
6384void
6385nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
6386{
6387 struct drm_nouveau_private *dev_priv = dev->dev_private;
6388 struct nvbios *bios = &dev_priv->vbios;
6389 struct init_exec iexec = { true, false };
6390
6391 parse_init_table(bios, table, &iexec);
6392}
6393
6394static bool NVInitVBIOS(struct drm_device *dev) 2277static bool NVInitVBIOS(struct drm_device *dev)
6395{ 2278{
6396 struct drm_nouveau_private *dev_priv = dev->dev_private; 2279 struct nouveau_drm *drm = nouveau_drm(dev);
6397 struct nvbios *bios = &dev_priv->vbios; 2280 struct nvbios *bios = &drm->vbios;
6398 2281
6399 memset(bios, 0, sizeof(struct nvbios)); 2282 memset(bios, 0, sizeof(struct nvbios));
6400 spin_lock_init(&bios->lock); 2283 spin_lock_init(&bios->lock);
6401 bios->dev = dev; 2284 bios->dev = dev;
6402 2285
6403 return bios_shadow(dev); 2286 bios->data = nouveau_bios(drm->device)->data;
2287 bios->length = nouveau_bios(drm->device)->size;
2288 return true;
6404} 2289}
6405 2290
6406static int nouveau_parse_vbios_struct(struct drm_device *dev) 2291static int nouveau_parse_vbios_struct(struct drm_device *dev)
6407{ 2292{
6408 struct drm_nouveau_private *dev_priv = dev->dev_private; 2293 struct nouveau_drm *drm = nouveau_drm(dev);
6409 struct nvbios *bios = &dev_priv->vbios; 2294 struct nvbios *bios = &drm->vbios;
6410 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; 2295 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
6411 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; 2296 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
6412 int offset; 2297 int offset;
@@ -6414,7 +2299,7 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6414 offset = findstr(bios->data, bios->length, 2299 offset = findstr(bios->data, bios->length,
6415 bit_signature, sizeof(bit_signature)); 2300 bit_signature, sizeof(bit_signature));
6416 if (offset) { 2301 if (offset) {
6417 NV_TRACE(dev, "BIT BIOS found\n"); 2302 NV_INFO(drm, "BIT BIOS found\n");
6418 bios->type = NVBIOS_BIT; 2303 bios->type = NVBIOS_BIT;
6419 bios->offset = offset; 2304 bios->offset = offset;
6420 return parse_bit_structure(bios, offset + 6); 2305 return parse_bit_structure(bios, offset + 6);
@@ -6423,21 +2308,21 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6423 offset = findstr(bios->data, bios->length, 2308 offset = findstr(bios->data, bios->length,
6424 bmp_signature, sizeof(bmp_signature)); 2309 bmp_signature, sizeof(bmp_signature));
6425 if (offset) { 2310 if (offset) {
6426 NV_TRACE(dev, "BMP BIOS found\n"); 2311 NV_INFO(drm, "BMP BIOS found\n");
6427 bios->type = NVBIOS_BMP; 2312 bios->type = NVBIOS_BMP;
6428 bios->offset = offset; 2313 bios->offset = offset;
6429 return parse_bmp_structure(dev, bios, offset); 2314 return parse_bmp_structure(dev, bios, offset);
6430 } 2315 }
6431 2316
6432 NV_ERROR(dev, "No known BIOS signature found\n"); 2317 NV_ERROR(drm, "No known BIOS signature found\n");
6433 return -ENODEV; 2318 return -ENODEV;
6434} 2319}
6435 2320
6436int 2321int
6437nouveau_run_vbios_init(struct drm_device *dev) 2322nouveau_run_vbios_init(struct drm_device *dev)
6438{ 2323{
6439 struct drm_nouveau_private *dev_priv = dev->dev_private; 2324 struct nouveau_drm *drm = nouveau_drm(dev);
6440 struct nvbios *bios = &dev_priv->vbios; 2325 struct nvbios *bios = &drm->vbios;
6441 int i, ret = 0; 2326 int i, ret = 0;
6442 2327
6443 /* Reset the BIOS head to 0. */ 2328 /* Reset the BIOS head to 0. */
@@ -6451,23 +2336,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
6451 bios->fp.lvds_init_run = false; 2336 bios->fp.lvds_init_run = false;
6452 } 2337 }
6453 2338
6454 parse_init_tables(bios); 2339 if (nv_device(drm->device)->card_type >= NV_50) {
6455 2340 for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
6456 /*
6457 * Runs some additional script seen on G8x VBIOSen. The VBIOS'
6458 * parser will run this right after the init tables, the binary
6459 * driver appears to run it at some point later.
6460 */
6461 if (bios->some_script_ptr) {
6462 struct init_exec iexec = {true, false};
6463
6464 NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
6465 bios->some_script_ptr);
6466 parse_init_table(bios, bios->some_script_ptr, &iexec);
6467 }
6468
6469 if (dev_priv->card_type >= NV_50) {
6470 for (i = 0; i < bios->dcb.entries; i++) {
6471 nouveau_bios_run_display_table(dev, 0, 0, 2341 nouveau_bios_run_display_table(dev, 0, 0,
6472 &bios->dcb.entry[i], -1); 2342 &bios->dcb.entry[i], -1);
6473 } 2343 }
@@ -6479,10 +2349,10 @@ nouveau_run_vbios_init(struct drm_device *dev)
6479static bool 2349static bool
6480nouveau_bios_posted(struct drm_device *dev) 2350nouveau_bios_posted(struct drm_device *dev)
6481{ 2351{
6482 struct drm_nouveau_private *dev_priv = dev->dev_private; 2352 struct nouveau_drm *drm = nouveau_drm(dev);
6483 unsigned htotal; 2353 unsigned htotal;
6484 2354
6485 if (dev_priv->card_type >= NV_50) { 2355 if (nv_device(drm->device)->card_type >= NV_50) {
6486 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 2356 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6487 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 2357 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6488 return false; 2358 return false;
@@ -6501,8 +2371,8 @@ nouveau_bios_posted(struct drm_device *dev)
6501int 2371int
6502nouveau_bios_init(struct drm_device *dev) 2372nouveau_bios_init(struct drm_device *dev)
6503{ 2373{
6504 struct drm_nouveau_private *dev_priv = dev->dev_private; 2374 struct nouveau_drm *drm = nouveau_drm(dev);
6505 struct nvbios *bios = &dev_priv->vbios; 2375 struct nvbios *bios = &drm->vbios;
6506 int ret; 2376 int ret;
6507 2377
6508 if (!NVInitVBIOS(dev)) 2378 if (!NVInitVBIOS(dev))
@@ -6512,14 +2382,6 @@ nouveau_bios_init(struct drm_device *dev)
6512 if (ret) 2382 if (ret)
6513 return ret; 2383 return ret;
6514 2384
6515 ret = nouveau_i2c_init(dev);
6516 if (ret)
6517 return ret;
6518
6519 ret = nouveau_mxm_init(dev);
6520 if (ret)
6521 return ret;
6522
6523 ret = parse_dcb_table(dev, bios); 2385 ret = parse_dcb_table(dev, bios);
6524 if (ret) 2386 if (ret)
6525 return ret; 2387 return ret;
@@ -6532,12 +2394,10 @@ nouveau_bios_init(struct drm_device *dev)
6532 2394
6533 /* ... unless card isn't POSTed already */ 2395 /* ... unless card isn't POSTed already */
6534 if (!nouveau_bios_posted(dev)) { 2396 if (!nouveau_bios_posted(dev)) {
6535 NV_INFO(dev, "Adaptor not initialised, " 2397 NV_INFO(drm, "Adaptor not initialised, "
6536 "running VBIOS init tables.\n"); 2398 "running VBIOS init tables.\n");
6537 bios->execute = true; 2399 bios->execute = true;
6538 } 2400 }
6539 if (nouveau_force_post)
6540 bios->execute = true;
6541 2401
6542 ret = nouveau_run_vbios_init(dev); 2402 ret = nouveau_run_vbios_init(dev);
6543 if (ret) 2403 if (ret)
@@ -6560,10 +2420,4 @@ nouveau_bios_init(struct drm_device *dev)
6560void 2420void
6561nouveau_bios_takedown(struct drm_device *dev) 2421nouveau_bios_takedown(struct drm_device *dev)
6562{ 2422{
6563 struct drm_nouveau_private *dev_priv = dev->dev_private;
6564
6565 nouveau_mxm_fini(dev);
6566 nouveau_i2c_fini(dev);
6567
6568 kfree(dev_priv->vbios.data);
6569} 2423}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 298a3af48d14..3befbb821a56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -21,11 +21,10 @@
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifndef __NOUVEAU_BIOS_H__ 24#ifndef __NOUVEAU_DISPBIOS_H__
25#define __NOUVEAU_BIOS_H__ 25#define __NOUVEAU_DISPBIOS_H__
26 26
27#include "nvreg.h" 27#include "nvreg.h"
28#include "nouveau_i2c.h"
29 28
30#define DCB_MAX_NUM_ENTRIES 16 29#define DCB_MAX_NUM_ENTRIES 16
31#define DCB_MAX_NUM_I2C_ENTRIES 16 30#define DCB_MAX_NUM_I2C_ENTRIES 16
@@ -39,8 +38,8 @@
39#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); }) 38#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
40#define ROM64(x) le64_to_cpu(*(u64 *)&(x)) 39#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
41#define ROMPTR(d,x) ({ \ 40#define ROMPTR(d,x) ({ \
42 struct drm_nouveau_private *dev_priv = (d)->dev_private; \ 41 struct nouveau_drm *drm = nouveau_drm((d)); \
43 ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \ 42 ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
44}) 43})
45 44
46struct bit_entry { 45struct bit_entry {
@@ -53,95 +52,19 @@ struct bit_entry {
53 52
54int bit_table(struct drm_device *, u8 id, struct bit_entry *); 53int bit_table(struct drm_device *, u8 id, struct bit_entry *);
55 54
56enum dcb_gpio_tag { 55#include <subdev/bios/dcb.h>
57 DCB_GPIO_PANEL_POWER = 0x01, 56#include <subdev/bios/conn.h>
58 DCB_GPIO_TVDAC0 = 0x0c,
59 DCB_GPIO_TVDAC1 = 0x2d,
60 DCB_GPIO_PWM_FAN = 0x09,
61 DCB_GPIO_FAN_SENSE = 0x3d,
62 DCB_GPIO_UNUSED = 0xff
63};
64
65enum dcb_connector_type {
66 DCB_CONNECTOR_VGA = 0x00,
67 DCB_CONNECTOR_TV_0 = 0x10,
68 DCB_CONNECTOR_TV_1 = 0x11,
69 DCB_CONNECTOR_TV_3 = 0x13,
70 DCB_CONNECTOR_DVI_I = 0x30,
71 DCB_CONNECTOR_DVI_D = 0x31,
72 DCB_CONNECTOR_DMS59_0 = 0x38,
73 DCB_CONNECTOR_DMS59_1 = 0x39,
74 DCB_CONNECTOR_LVDS = 0x40,
75 DCB_CONNECTOR_LVDS_SPWG = 0x41,
76 DCB_CONNECTOR_DP = 0x46,
77 DCB_CONNECTOR_eDP = 0x47,
78 DCB_CONNECTOR_HDMI_0 = 0x60,
79 DCB_CONNECTOR_HDMI_1 = 0x61,
80 DCB_CONNECTOR_DMS59_DP0 = 0x64,
81 DCB_CONNECTOR_DMS59_DP1 = 0x65,
82 DCB_CONNECTOR_NONE = 0xff
83};
84
85enum dcb_type {
86 OUTPUT_ANALOG = 0,
87 OUTPUT_TV = 1,
88 OUTPUT_TMDS = 2,
89 OUTPUT_LVDS = 3,
90 OUTPUT_DP = 6,
91 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
92 OUTPUT_UNUSED = 15,
93 OUTPUT_ANY = -1
94};
95
96struct dcb_entry {
97 int index; /* may not be raw dcb index if merging has happened */
98 enum dcb_type type;
99 uint8_t i2c_index;
100 uint8_t heads;
101 uint8_t connector;
102 uint8_t bus;
103 uint8_t location;
104 uint8_t or;
105 bool duallink_possible;
106 union {
107 struct sor_conf {
108 int link;
109 } sorconf;
110 struct {
111 int maxfreq;
112 } crtconf;
113 struct {
114 struct sor_conf sor;
115 bool use_straps_for_mode;
116 bool use_acpi_for_edid;
117 bool use_power_scripts;
118 } lvdsconf;
119 struct {
120 bool has_component_output;
121 } tvconf;
122 struct {
123 struct sor_conf sor;
124 int link_nr;
125 int link_bw;
126 } dpconf;
127 struct {
128 struct sor_conf sor;
129 int slave_addr;
130 } tmdsconf;
131 };
132 bool i2c_upper_default;
133};
134 57
135struct dcb_table { 58struct dcb_table {
136 uint8_t version; 59 uint8_t version;
137 int entries; 60 int entries;
138 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 61 struct dcb_output entry[DCB_MAX_NUM_ENTRIES];
139}; 62};
140 63
141enum nouveau_or { 64enum nouveau_or {
142 OUTPUT_A = (1 << 0), 65 DCB_OUTPUT_A = (1 << 0),
143 OUTPUT_B = (1 << 1), 66 DCB_OUTPUT_B = (1 << 1),
144 OUTPUT_C = (1 << 2) 67 DCB_OUTPUT_C = (1 << 2)
145}; 68};
146 69
147enum LVDS_script { 70enum LVDS_script {
@@ -154,58 +77,6 @@ enum LVDS_script {
154 LVDS_PANEL_OFF 77 LVDS_PANEL_OFF
155}; 78};
156 79
157/* these match types in pll limits table version 0x40,
158 * nouveau uses them on all chipsets internally where a
159 * specific pll needs to be referenced, but the exact
160 * register isn't known.
161 */
162enum pll_types {
163 PLL_CORE = 0x01,
164 PLL_SHADER = 0x02,
165 PLL_UNK03 = 0x03,
166 PLL_MEMORY = 0x04,
167 PLL_VDEC = 0x05,
168 PLL_UNK40 = 0x40,
169 PLL_UNK41 = 0x41,
170 PLL_UNK42 = 0x42,
171 PLL_VPLL0 = 0x80,
172 PLL_VPLL1 = 0x81,
173 PLL_MAX = 0xff
174};
175
176struct pll_lims {
177 u32 reg;
178
179 struct {
180 int minfreq;
181 int maxfreq;
182 int min_inputfreq;
183 int max_inputfreq;
184
185 uint8_t min_m;
186 uint8_t max_m;
187 uint8_t min_n;
188 uint8_t max_n;
189 } vco1, vco2;
190
191 uint8_t max_log2p;
192 /*
193 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
194 * value) is no different to 6 (at least for vplls) so allowing the MNP
195 * calc to use 7 causes the generated clock to be out by a factor of 2.
196 * however, max_log2p cannot be fixed-up during parsing as the
197 * unmodified max_log2p value is still needed for setting mplls, hence
198 * an additional max_usable_log2p member
199 */
200 uint8_t max_usable_log2p;
201 uint8_t log2p_bias;
202
203 uint8_t min_p;
204 uint8_t max_p;
205
206 int refclk;
207};
208
209struct nvbios { 80struct nvbios {
210 struct drm_device *dev; 81 struct drm_device *dev;
211 enum { 82 enum {
@@ -257,7 +128,7 @@ struct nvbios {
257 } state; 128 } state;
258 129
259 struct { 130 struct {
260 struct dcb_entry *output; 131 struct dcb_output *output;
261 int crtc; 132 int crtc;
262 uint16_t script_table_ptr; 133 uint16_t script_table_ptr;
263 } display; 134 } display;
@@ -302,11 +173,28 @@ struct nvbios {
302 } legacy; 173 } legacy;
303}; 174};
304 175
305void *dcb_table(struct drm_device *); 176void *olddcb_table(struct drm_device *);
306void *dcb_outp(struct drm_device *, u8 idx); 177void *olddcb_outp(struct drm_device *, u8 idx);
307int dcb_outp_foreach(struct drm_device *, void *data, 178int olddcb_outp_foreach(struct drm_device *, void *data,
308 int (*)(struct drm_device *, void *, int idx, u8 *outp)); 179 int (*)(struct drm_device *, void *, int idx, u8 *outp));
309u8 *dcb_conntab(struct drm_device *); 180u8 *olddcb_conntab(struct drm_device *);
310u8 *dcb_conn(struct drm_device *, u8 idx); 181u8 *olddcb_conn(struct drm_device *, u8 idx);
182
183int nouveau_bios_init(struct drm_device *);
184void nouveau_bios_takedown(struct drm_device *dev);
185int nouveau_run_vbios_init(struct drm_device *);
186struct dcb_connector_table_entry *
187nouveau_bios_connector_entry(struct drm_device *, int index);
188int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
189 struct dcb_output *, int crtc);
190bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
191uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
192int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
193 bool *dl, bool *if_is_24bit);
194int run_tmds_table(struct drm_device *, struct dcb_output *,
195 int head, int pxclk);
196int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
197 enum LVDS_script, int pxclk);
198bool bios_encoder_match(struct dcb_output *, u32 hash);
311 199
312#endif 200#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7f80ed523562..259e5f1adf47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,31 +27,127 @@
27 * Jeremy Kolb <jkolb@brandeis.edu> 27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */ 28 */
29 29
30#include "drmP.h" 30#include <core/engine.h>
31#include "ttm/ttm_page_alloc.h" 31
32#include <subdev/fb.h>
33#include <subdev/vm.h>
34#include <subdev/bar.h>
32 35
33#include "nouveau_drm.h" 36#include "nouveau_drm.h"
34#include "nouveau_drv.h"
35#include "nouveau_dma.h" 37#include "nouveau_dma.h"
36#include "nouveau_mm.h"
37#include "nouveau_vm.h"
38#include "nouveau_fence.h" 38#include "nouveau_fence.h"
39#include "nouveau_ramht.h"
40 39
41#include <linux/log2.h> 40#include "nouveau_bo.h"
42#include <linux/slab.h> 41#include "nouveau_ttm.h"
42#include "nouveau_gem.h"
43
44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
49nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50 u32 addr, u32 size, u32 pitch, u32 flags)
51{
52 struct nouveau_drm *drm = nouveau_drm(dev);
53 int i = reg - drm->tile.reg;
54 struct nouveau_fb *pfb = nouveau_fb(drm->device);
55 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56 struct nouveau_engine *engine;
57
58 nouveau_fence_unref(&reg->fence);
59
60 if (tile->pitch)
61 pfb->tile.fini(pfb, i, tile);
62
63 if (pitch)
64 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
65
66 pfb->tile.prog(pfb, i, tile);
67
68 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
69 engine->tile_prog(engine, i);
70 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71 engine->tile_prog(engine, i);
72}
73
74static struct nouveau_drm_tile *
75nv10_bo_get_tile_region(struct drm_device *dev, int i)
76{
77 struct nouveau_drm *drm = nouveau_drm(dev);
78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
79
80 spin_lock(&drm->tile.lock);
81
82 if (!tile->used &&
83 (!tile->fence || nouveau_fence_done(tile->fence)))
84 tile->used = true;
85 else
86 tile = NULL;
87
88 spin_unlock(&drm->tile.lock);
89 return tile;
90}
91
92static void
93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94 struct nouveau_fence *fence)
95{
96 struct nouveau_drm *drm = nouveau_drm(dev);
97
98 if (tile) {
99 spin_lock(&drm->tile.lock);
100 if (fence) {
101 /* Mark it as pending. */
102 tile->fence = fence;
103 nouveau_fence_ref(fence);
104 }
105
106 tile->used = false;
107 spin_unlock(&drm->tile.lock);
108 }
109}
110
111static struct nouveau_drm_tile *
112nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
113 u32 size, u32 pitch, u32 flags)
114{
115 struct nouveau_drm *drm = nouveau_drm(dev);
116 struct nouveau_fb *pfb = nouveau_fb(drm->device);
117 struct nouveau_drm_tile *tile, *found = NULL;
118 int i;
119
120 for (i = 0; i < pfb->tile.regions; i++) {
121 tile = nv10_bo_get_tile_region(dev, i);
122
123 if (pitch && !found) {
124 found = tile;
125 continue;
126
127 } else if (tile && pfb->tile.region[i].pitch) {
128 /* Kill an unused tile region. */
129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
130 }
131
132 nv10_bo_put_tile_region(dev, tile, NULL);
133 }
134
135 if (found)
136 nv10_bo_update_tile_region(dev, found, addr, size,
137 pitch, flags);
138 return found;
139}
43 140
44static void 141static void
45nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 142nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46{ 143{
47 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 144 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
48 struct drm_device *dev = dev_priv->dev; 145 struct drm_device *dev = drm->dev;
49 struct nouveau_bo *nvbo = nouveau_bo(bo); 146 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 147
51 if (unlikely(nvbo->gem)) 148 if (unlikely(nvbo->gem))
52 DRM_ERROR("bo %p still attached to GEM object\n", bo); 149 DRM_ERROR("bo %p still attached to GEM object\n", bo);
53 150 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
54 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
55 kfree(nvbo); 151 kfree(nvbo);
56} 152}
57 153
@@ -59,23 +155,24 @@ static void
59nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
60 int *align, int *size) 156 int *align, int *size)
61{ 157{
62 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nouveau_device *device = nv_device(drm->device);
63 160
64 if (dev_priv->card_type < NV_50) { 161 if (device->card_type < NV_50) {
65 if (nvbo->tile_mode) { 162 if (nvbo->tile_mode) {
66 if (dev_priv->chipset >= 0x40) { 163 if (device->chipset >= 0x40) {
67 *align = 65536; 164 *align = 65536;
68 *size = roundup(*size, 64 * nvbo->tile_mode); 165 *size = roundup(*size, 64 * nvbo->tile_mode);
69 166
70 } else if (dev_priv->chipset >= 0x30) { 167 } else if (device->chipset >= 0x30) {
71 *align = 32768; 168 *align = 32768;
72 *size = roundup(*size, 64 * nvbo->tile_mode); 169 *size = roundup(*size, 64 * nvbo->tile_mode);
73 170
74 } else if (dev_priv->chipset >= 0x20) { 171 } else if (device->chipset >= 0x20) {
75 *align = 16384; 172 *align = 16384;
76 *size = roundup(*size, 64 * nvbo->tile_mode); 173 *size = roundup(*size, 64 * nvbo->tile_mode);
77 174
78 } else if (dev_priv->chipset >= 0x10) { 175 } else if (device->chipset >= 0x10) {
79 *align = 16384; 176 *align = 16384;
80 *size = roundup(*size, 32 * nvbo->tile_mode); 177 *size = roundup(*size, 32 * nvbo->tile_mode);
81 } 178 }
@@ -94,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
94 struct sg_table *sg, 191 struct sg_table *sg,
95 struct nouveau_bo **pnvbo) 192 struct nouveau_bo **pnvbo)
96{ 193{
97 struct drm_nouveau_private *dev_priv = dev->dev_private; 194 struct nouveau_drm *drm = nouveau_drm(dev);
98 struct nouveau_bo *nvbo; 195 struct nouveau_bo *nvbo;
99 size_t acc_size; 196 size_t acc_size;
100 int ret; 197 int ret;
@@ -111,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
111 INIT_LIST_HEAD(&nvbo->vma_list); 208 INIT_LIST_HEAD(&nvbo->vma_list);
112 nvbo->tile_mode = tile_mode; 209 nvbo->tile_mode = tile_mode;
113 nvbo->tile_flags = tile_flags; 210 nvbo->tile_flags = tile_flags;
114 nvbo->bo.bdev = &dev_priv->ttm.bdev; 211 nvbo->bo.bdev = &drm->ttm.bdev;
115 212
116 nvbo->page_shift = 12; 213 nvbo->page_shift = 12;
117 if (dev_priv->bar1_vm) { 214 if (drm->client.base.vm) {
118 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 215 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
119 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; 216 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
120 } 217 }
121 218
122 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
123 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 220 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
124 nouveau_bo_placement_set(nvbo, flags, 0); 221 nouveau_bo_placement_set(nvbo, flags, 0);
125 222
126 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, 223 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
127 sizeof(struct nouveau_bo)); 224 sizeof(struct nouveau_bo));
128 225
129 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
130 type, &nvbo->placement, 227 type, &nvbo->placement,
131 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, 228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
132 nouveau_bo_del_ttm); 229 nouveau_bo_del_ttm);
@@ -155,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
155static void 252static void
156set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 253set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
157{ 254{
158 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 255 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; 256 struct nouveau_fb *pfb = nouveau_fb(drm->device);
257 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
160 258
161 if (dev_priv->card_type == NV_10 && 259 if (nv_device(drm->device)->card_type == NV_10 &&
162 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 260 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
163 nvbo->bo.mem.num_pages < vram_pages / 4) { 261 nvbo->bo.mem.num_pages < vram_pages / 4) {
164 /* 262 /*
@@ -198,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
198int 296int
199nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 297nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200{ 298{
201 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 299 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
202 struct ttm_buffer_object *bo = &nvbo->bo; 300 struct ttm_buffer_object *bo = &nvbo->bo;
203 int ret; 301 int ret;
204 302
205 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 303 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
206 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 304 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
208 1 << bo->mem.mem_type, memtype); 305 1 << bo->mem.mem_type, memtype);
209 return -EINVAL; 306 return -EINVAL;
210 } 307 }
@@ -222,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
222 if (ret == 0) { 319 if (ret == 0) {
223 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
224 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
225 dev_priv->fb_aper_free -= bo->mem.size; 322 drm->gem.vram_available -= bo->mem.size;
226 break; 323 break;
227 case TTM_PL_TT: 324 case TTM_PL_TT:
228 dev_priv->gart_info.aper_free -= bo->mem.size; 325 drm->gem.gart_available -= bo->mem.size;
229 break; 326 break;
230 default: 327 default:
231 break; 328 break;
@@ -241,7 +338,7 @@ out:
241int 338int
242nouveau_bo_unpin(struct nouveau_bo *nvbo) 339nouveau_bo_unpin(struct nouveau_bo *nvbo)
243{ 340{
244 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
245 struct ttm_buffer_object *bo = &nvbo->bo; 342 struct ttm_buffer_object *bo = &nvbo->bo;
246 int ret; 343 int ret;
247 344
@@ -258,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
258 if (ret == 0) { 355 if (ret == 0) {
259 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
260 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
261 dev_priv->fb_aper_free += bo->mem.size; 358 drm->gem.vram_available += bo->mem.size;
262 break; 359 break;
263 case TTM_PL_TT: 360 case TTM_PL_TT:
264 dev_priv->gart_info.aper_free += bo->mem.size; 361 drm->gem.gart_available += bo->mem.size;
265 break; 362 break;
266 default: 363 default:
267 break; 364 break;
@@ -356,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
356} 453}
357 454
358static struct ttm_tt * 455static struct ttm_tt *
359nouveau_ttm_tt_create(struct ttm_bo_device *bdev, 456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
360 unsigned long size, uint32_t page_flags, 457 uint32_t page_flags, struct page *dummy_read)
361 struct page *dummy_read_page)
362{ 458{
363 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 459 struct nouveau_drm *drm = nouveau_bdev(bdev);
364 struct drm_device *dev = dev_priv->dev; 460 struct drm_device *dev = drm->dev;
365 461
366 switch (dev_priv->gart_info.type) { 462 if (drm->agp.stat == ENABLED) {
367#if __OS_HAS_AGP 463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
368 case NOUVEAU_GART_AGP: 464 page_flags, dummy_read);
369 return ttm_agp_tt_create(bdev, dev->agp->bridge,
370 size, page_flags, dummy_read_page);
371#endif
372 case NOUVEAU_GART_PDMA:
373 case NOUVEAU_GART_HW:
374 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
375 dummy_read_page);
376 default:
377 NV_ERROR(dev, "Unknown GART type %d\n",
378 dev_priv->gart_info.type);
379 break;
380 } 465 }
381 466
382 return NULL; 467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
383} 468}
384 469
385static int 470static int
@@ -393,8 +478,7 @@ static int
393nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 478nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
394 struct ttm_mem_type_manager *man) 479 struct ttm_mem_type_manager *man)
395{ 480{
396 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 481 struct nouveau_drm *drm = nouveau_bdev(bdev);
397 struct drm_device *dev = dev_priv->dev;
398 482
399 switch (type) { 483 switch (type) {
400 case TTM_PL_SYSTEM: 484 case TTM_PL_SYSTEM:
@@ -403,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
403 man->default_caching = TTM_PL_FLAG_CACHED; 487 man->default_caching = TTM_PL_FLAG_CACHED;
404 break; 488 break;
405 case TTM_PL_VRAM: 489 case TTM_PL_VRAM:
406 if (dev_priv->card_type >= NV_50) { 490 if (nv_device(drm->device)->card_type >= NV_50) {
407 man->func = &nouveau_vram_manager; 491 man->func = &nouveau_vram_manager;
408 man->io_reserve_fastpath = false; 492 man->io_reserve_fastpath = false;
409 man->use_io_reserve_lru = true; 493 man->use_io_reserve_lru = true;
@@ -417,32 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
417 man->default_caching = TTM_PL_FLAG_WC; 501 man->default_caching = TTM_PL_FLAG_WC;
418 break; 502 break;
419 case TTM_PL_TT: 503 case TTM_PL_TT:
420 if (dev_priv->card_type >= NV_50) 504 if (nv_device(drm->device)->card_type >= NV_50)
421 man->func = &nouveau_gart_manager; 505 man->func = &nouveau_gart_manager;
422 else 506 else
507 if (drm->agp.stat != ENABLED)
508 man->func = &nv04_gart_manager;
509 else
423 man->func = &ttm_bo_manager_func; 510 man->func = &ttm_bo_manager_func;
424 switch (dev_priv->gart_info.type) { 511
425 case NOUVEAU_GART_AGP: 512 if (drm->agp.stat == ENABLED) {
426 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 513 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
427 man->available_caching = TTM_PL_FLAG_UNCACHED | 514 man->available_caching = TTM_PL_FLAG_UNCACHED |
428 TTM_PL_FLAG_WC; 515 TTM_PL_FLAG_WC;
429 man->default_caching = TTM_PL_FLAG_WC; 516 man->default_caching = TTM_PL_FLAG_WC;
430 break; 517 } else {
431 case NOUVEAU_GART_PDMA:
432 case NOUVEAU_GART_HW:
433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 518 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
434 TTM_MEMTYPE_FLAG_CMA; 519 TTM_MEMTYPE_FLAG_CMA;
435 man->available_caching = TTM_PL_MASK_CACHING; 520 man->available_caching = TTM_PL_MASK_CACHING;
436 man->default_caching = TTM_PL_FLAG_CACHED; 521 man->default_caching = TTM_PL_FLAG_CACHED;
437 break;
438 default:
439 NV_ERROR(dev, "Unknown GART type: %d\n",
440 dev_priv->gart_info.type);
441 return -EINVAL;
442 } 522 }
523
443 break; 524 break;
444 default: 525 default:
445 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
446 return -EINVAL; 526 return -EINVAL;
447 } 527 }
448 return 0; 528 return 0;
@@ -491,6 +571,18 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
491} 571}
492 572
493static int 573static int
574nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
575{
576 int ret = RING_SPACE(chan, 2);
577 if (ret == 0) {
578 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
579 OUT_RING (chan, handle);
580 FIRE_RING (chan);
581 }
582 return ret;
583}
584
585static int
494nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 586nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
495 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 587 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
496{ 588{
@@ -676,20 +768,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
676static int 768static int
677nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) 769nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{ 770{
679 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 771 int ret = RING_SPACE(chan, 6);
680 &chan->m2mf_ntfy);
681 if (ret == 0) { 772 if (ret == 0) {
682 ret = RING_SPACE(chan, 6); 773 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
683 if (ret == 0) { 774 OUT_RING (chan, handle);
684 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 775 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
685 OUT_RING (chan, handle); 776 OUT_RING (chan, NvNotify0);
686 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 777 OUT_RING (chan, NvDmaFB);
687 OUT_RING (chan, NvNotify0); 778 OUT_RING (chan, NvDmaFB);
688 OUT_RING (chan, NvDmaFB);
689 OUT_RING (chan, NvDmaFB);
690 } else {
691 nouveau_ramht_remove(chan, NvNotify0);
692 }
693 } 779 }
694 780
695 return ret; 781 return ret;
@@ -788,16 +874,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
788static int 874static int
789nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) 875nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
790{ 876{
791 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 877 int ret = RING_SPACE(chan, 4);
792 &chan->m2mf_ntfy);
793 if (ret == 0) { 878 if (ret == 0) {
794 ret = RING_SPACE(chan, 4); 879 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
795 if (ret == 0) { 880 OUT_RING (chan, handle);
796 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 881 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
797 OUT_RING (chan, handle); 882 OUT_RING (chan, NvNotify0);
798 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
799 OUT_RING (chan, NvNotify0);
800 }
801 } 883 }
802 884
803 return ret; 885 return ret;
@@ -808,8 +890,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
808 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 890 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
809{ 891{
810 if (mem->mem_type == TTM_PL_TT) 892 if (mem->mem_type == TTM_PL_TT)
811 return chan->gart_handle; 893 return NvDmaTT;
812 return chan->vram_handle; 894 return NvDmaFB;
813} 895}
814 896
815static int 897static int
@@ -865,8 +947,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
865 struct nouveau_mem *node = mem->mm_node; 947 struct nouveau_mem *node = mem->mm_node;
866 int ret; 948 int ret;
867 949
868 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, 950 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
869 node->page_shift, NV_MEM_ACCESS_RO, vma); 951 PAGE_SHIFT, node->page_shift,
952 NV_MEM_ACCESS_RW, vma);
870 if (ret) 953 if (ret)
871 return ret; 954 return ret;
872 955
@@ -883,19 +966,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
883 bool no_wait_reserve, bool no_wait_gpu, 966 bool no_wait_reserve, bool no_wait_gpu,
884 struct ttm_mem_reg *new_mem) 967 struct ttm_mem_reg *new_mem)
885{ 968{
886 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
887 struct nouveau_channel *chan = chan = dev_priv->channel; 970 struct nouveau_channel *chan = chan = drm->channel;
888 struct nouveau_bo *nvbo = nouveau_bo(bo); 971 struct nouveau_bo *nvbo = nouveau_bo(bo);
889 struct ttm_mem_reg *old_mem = &bo->mem; 972 struct ttm_mem_reg *old_mem = &bo->mem;
890 int ret; 973 int ret;
891 974
892 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 975 mutex_lock(&chan->cli->mutex);
893 976
894 /* create temporary vmas for the transfer and attach them to the 977 /* create temporary vmas for the transfer and attach them to the
895 * old nouveau_mem node, these will get cleaned up after ttm has 978 * old nouveau_mem node, these will get cleaned up after ttm has
896 * destroyed the ttm_mem_reg 979 * destroyed the ttm_mem_reg
897 */ 980 */
898 if (dev_priv->card_type >= NV_50) { 981 if (nv_device(drm->device)->card_type >= NV_50) {
899 struct nouveau_mem *node = old_mem->mm_node; 982 struct nouveau_mem *node = old_mem->mm_node;
900 983
901 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); 984 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@@ -907,7 +990,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
907 goto out; 990 goto out;
908 } 991 }
909 992
910 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
911 if (ret == 0) { 994 if (ret == 0) {
912 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
913 no_wait_reserve, 996 no_wait_reserve,
@@ -915,14 +998,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
915 } 998 }
916 999
917out: 1000out:
918 mutex_unlock(&chan->mutex); 1001 mutex_unlock(&chan->cli->mutex);
919 return ret; 1002 return ret;
920} 1003}
921 1004
922void 1005void
923nouveau_bo_move_init(struct nouveau_channel *chan) 1006nouveau_bo_move_init(struct nouveau_drm *drm)
924{ 1007{
925 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
926 static const struct { 1008 static const struct {
927 const char *name; 1009 const char *name;
928 int engine; 1010 int engine;
@@ -932,7 +1014,8 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
932 struct ttm_mem_reg *, struct ttm_mem_reg *); 1014 struct ttm_mem_reg *, struct ttm_mem_reg *);
933 int (*init)(struct nouveau_channel *, u32 handle); 1015 int (*init)(struct nouveau_channel *, u32 handle);
934 } _methods[] = { 1016 } _methods[] = {
935 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1017 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1018 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
936 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1019 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
937 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1020 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
938 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 1021 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
@@ -947,19 +1030,34 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
947 int ret; 1030 int ret;
948 1031
949 do { 1032 do {
1033 struct nouveau_object *object;
1034 struct nouveau_channel *chan;
950 u32 handle = (mthd->engine << 16) | mthd->oclass; 1035 u32 handle = (mthd->engine << 16) | mthd->oclass;
951 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); 1036
1037 if (mthd->init == nve0_bo_move_init)
1038 chan = drm->cechan;
1039 else
1040 chan = drm->channel;
1041 if (chan == NULL)
1042 continue;
1043
1044 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1045 mthd->oclass, NULL, 0, &object);
952 if (ret == 0) { 1046 if (ret == 0) {
953 ret = mthd->init(chan, handle); 1047 ret = mthd->init(chan, handle);
954 if (ret == 0) { 1048 if (ret) {
955 dev_priv->ttm.move = mthd->exec; 1049 nouveau_object_del(nv_object(drm),
956 name = mthd->name; 1050 chan->handle, handle);
957 break; 1051 continue;
958 } 1052 }
1053
1054 drm->ttm.move = mthd->exec;
1055 name = mthd->name;
1056 break;
959 } 1057 }
960 } while ((++mthd)->exec); 1058 } while ((++mthd)->exec);
961 1059
962 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); 1060 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
963} 1061}
964 1062
965static int 1063static int
@@ -1044,7 +1142,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1044 nouveau_vm_map(vma, new_mem->mm_node); 1142 nouveau_vm_map(vma, new_mem->mm_node);
1045 } else 1143 } else
1046 if (new_mem && new_mem->mem_type == TTM_PL_TT && 1144 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1047 nvbo->page_shift == vma->vm->spg_shift) { 1145 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1048 if (((struct nouveau_mem *)new_mem->mm_node)->sg) 1146 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1049 nouveau_vm_map_sg_table(vma, 0, new_mem-> 1147 nouveau_vm_map_sg_table(vma, 0, new_mem->
1050 num_pages << PAGE_SHIFT, 1148 num_pages << PAGE_SHIFT,
@@ -1061,10 +1159,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1061 1159
1062static int 1160static int
1063nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 1161nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1064 struct nouveau_tile_reg **new_tile) 1162 struct nouveau_drm_tile **new_tile)
1065{ 1163{
1066 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1164 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1067 struct drm_device *dev = dev_priv->dev; 1165 struct drm_device *dev = drm->dev;
1068 struct nouveau_bo *nvbo = nouveau_bo(bo); 1166 struct nouveau_bo *nvbo = nouveau_bo(bo);
1069 u64 offset = new_mem->start << PAGE_SHIFT; 1167 u64 offset = new_mem->start << PAGE_SHIFT;
1070 1168
@@ -1072,8 +1170,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1072 if (new_mem->mem_type != TTM_PL_VRAM) 1170 if (new_mem->mem_type != TTM_PL_VRAM)
1073 return 0; 1171 return 0;
1074 1172
1075 if (dev_priv->card_type >= NV_10) { 1173 if (nv_device(drm->device)->card_type >= NV_10) {
1076 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 1174 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1077 nvbo->tile_mode, 1175 nvbo->tile_mode,
1078 nvbo->tile_flags); 1176 nvbo->tile_flags);
1079 } 1177 }
@@ -1083,13 +1181,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1083 1181
1084static void 1182static void
1085nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 1183nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1086 struct nouveau_tile_reg *new_tile, 1184 struct nouveau_drm_tile *new_tile,
1087 struct nouveau_tile_reg **old_tile) 1185 struct nouveau_drm_tile **old_tile)
1088{ 1186{
1089 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1187 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1090 struct drm_device *dev = dev_priv->dev; 1188 struct drm_device *dev = drm->dev;
1091 1189
1092 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); 1190 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1093 *old_tile = new_tile; 1191 *old_tile = new_tile;
1094} 1192}
1095 1193
@@ -1098,13 +1196,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1098 bool no_wait_reserve, bool no_wait_gpu, 1196 bool no_wait_reserve, bool no_wait_gpu,
1099 struct ttm_mem_reg *new_mem) 1197 struct ttm_mem_reg *new_mem)
1100{ 1198{
1101 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1199 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1102 struct nouveau_bo *nvbo = nouveau_bo(bo); 1200 struct nouveau_bo *nvbo = nouveau_bo(bo);
1103 struct ttm_mem_reg *old_mem = &bo->mem; 1201 struct ttm_mem_reg *old_mem = &bo->mem;
1104 struct nouveau_tile_reg *new_tile = NULL; 1202 struct nouveau_drm_tile *new_tile = NULL;
1105 int ret = 0; 1203 int ret = 0;
1106 1204
1107 if (dev_priv->card_type < NV_50) { 1205 if (nv_device(drm->device)->card_type < NV_50) {
1108 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1206 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1109 if (ret) 1207 if (ret)
1110 return ret; 1208 return ret;
@@ -1119,7 +1217,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1119 } 1217 }
1120 1218
1121 /* CPU copy if we have no accelerated method available */ 1219 /* CPU copy if we have no accelerated method available */
1122 if (!dev_priv->ttm.move) { 1220 if (!drm->ttm.move) {
1123 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1221 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1124 goto out; 1222 goto out;
1125 } 1223 }
@@ -1139,7 +1237,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1139 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1237 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1140 1238
1141out: 1239out:
1142 if (dev_priv->card_type < NV_50) { 1240 if (nv_device(drm->device)->card_type < NV_50) {
1143 if (ret) 1241 if (ret)
1144 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1242 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1145 else 1243 else
@@ -1159,8 +1257,8 @@ static int
1159nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1257nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1160{ 1258{
1161 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1259 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1162 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1260 struct nouveau_drm *drm = nouveau_bdev(bdev);
1163 struct drm_device *dev = dev_priv->dev; 1261 struct drm_device *dev = drm->dev;
1164 int ret; 1262 int ret;
1165 1263
1166 mem->bus.addr = NULL; 1264 mem->bus.addr = NULL;
@@ -1176,48 +1274,28 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1176 return 0; 1274 return 0;
1177 case TTM_PL_TT: 1275 case TTM_PL_TT:
1178#if __OS_HAS_AGP 1276#if __OS_HAS_AGP
1179 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1277 if (drm->agp.stat == ENABLED) {
1180 mem->bus.offset = mem->start << PAGE_SHIFT; 1278 mem->bus.offset = mem->start << PAGE_SHIFT;
1181 mem->bus.base = dev_priv->gart_info.aper_base; 1279 mem->bus.base = drm->agp.base;
1182 mem->bus.is_iomem = true; 1280 mem->bus.is_iomem = true;
1183 } 1281 }
1184#endif 1282#endif
1185 break; 1283 break;
1186 case TTM_PL_VRAM: 1284 case TTM_PL_VRAM:
1187 { 1285 mem->bus.offset = mem->start << PAGE_SHIFT;
1188 struct nouveau_mem *node = mem->mm_node; 1286 mem->bus.base = pci_resource_start(dev->pdev, 1);
1189 u8 page_shift; 1287 mem->bus.is_iomem = true;
1190 1288 if (nv_device(drm->device)->card_type >= NV_50) {
1191 if (!dev_priv->bar1_vm) { 1289 struct nouveau_bar *bar = nouveau_bar(drm->device);
1192 mem->bus.offset = mem->start << PAGE_SHIFT; 1290 struct nouveau_mem *node = mem->mm_node;
1193 mem->bus.base = pci_resource_start(dev->pdev, 1);
1194 mem->bus.is_iomem = true;
1195 break;
1196 }
1197
1198 if (dev_priv->card_type >= NV_C0)
1199 page_shift = node->page_shift;
1200 else
1201 page_shift = 12;
1202 1291
1203 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 1292 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1204 page_shift, NV_MEM_ACCESS_RW, 1293 &node->bar_vma);
1205 &node->bar_vma); 1294 if (ret)
1206 if (ret) 1295 return ret;
1207 return ret;
1208 1296
1209 nouveau_vm_map(&node->bar_vma, node); 1297 mem->bus.offset = node->bar_vma.offset;
1210 if (ret) {
1211 nouveau_vm_put(&node->bar_vma);
1212 return ret;
1213 } 1298 }
1214
1215 mem->bus.offset = node->bar_vma.offset;
1216 if (dev_priv->card_type == NV_50) /*XXX*/
1217 mem->bus.offset -= 0x0020000000ULL;
1218 mem->bus.base = pci_resource_start(dev->pdev, 1);
1219 mem->bus.is_iomem = true;
1220 }
1221 break; 1299 break;
1222 default: 1300 default:
1223 return -EINVAL; 1301 return -EINVAL;
@@ -1228,41 +1306,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1228static void 1306static void
1229nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1307nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1230{ 1308{
1231 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1309 struct nouveau_drm *drm = nouveau_bdev(bdev);
1310 struct nouveau_bar *bar = nouveau_bar(drm->device);
1232 struct nouveau_mem *node = mem->mm_node; 1311 struct nouveau_mem *node = mem->mm_node;
1233 1312
1234 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1235 return;
1236
1237 if (!node->bar_vma.node) 1313 if (!node->bar_vma.node)
1238 return; 1314 return;
1239 1315
1240 nouveau_vm_unmap(&node->bar_vma); 1316 bar->unmap(bar, &node->bar_vma);
1241 nouveau_vm_put(&node->bar_vma);
1242} 1317}
1243 1318
1244static int 1319static int
1245nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1320nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1246{ 1321{
1247 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1322 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1248 struct nouveau_bo *nvbo = nouveau_bo(bo); 1323 struct nouveau_bo *nvbo = nouveau_bo(bo);
1324 struct nouveau_device *device = nv_device(drm->device);
1325 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1249 1326
1250 /* as long as the bo isn't in vram, and isn't tiled, we've got 1327 /* as long as the bo isn't in vram, and isn't tiled, we've got
1251 * nothing to do here. 1328 * nothing to do here.
1252 */ 1329 */
1253 if (bo->mem.mem_type != TTM_PL_VRAM) { 1330 if (bo->mem.mem_type != TTM_PL_VRAM) {
1254 if (dev_priv->card_type < NV_50 || 1331 if (nv_device(drm->device)->card_type < NV_50 ||
1255 !nouveau_bo_tile_layout(nvbo)) 1332 !nouveau_bo_tile_layout(nvbo))
1256 return 0; 1333 return 0;
1257 } 1334 }
1258 1335
1259 /* make sure bo is in mappable vram */ 1336 /* make sure bo is in mappable vram */
1260 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) 1337 if (bo->mem.start + bo->mem.num_pages < mappable)
1261 return 0; 1338 return 0;
1262 1339
1263 1340
1264 nvbo->placement.fpfn = 0; 1341 nvbo->placement.fpfn = 0;
1265 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 1342 nvbo->placement.lpfn = mappable;
1266 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1343 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1267 return nouveau_bo_validate(nvbo, false, true, false); 1344 return nouveau_bo_validate(nvbo, false, true, false);
1268} 1345}
@@ -1271,7 +1348,7 @@ static int
1271nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1348nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1272{ 1349{
1273 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1350 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1274 struct drm_nouveau_private *dev_priv; 1351 struct nouveau_drm *drm;
1275 struct drm_device *dev; 1352 struct drm_device *dev;
1276 unsigned i; 1353 unsigned i;
1277 int r; 1354 int r;
@@ -1288,11 +1365,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1288 return 0; 1365 return 0;
1289 } 1366 }
1290 1367
1291 dev_priv = nouveau_bdev(ttm->bdev); 1368 drm = nouveau_bdev(ttm->bdev);
1292 dev = dev_priv->dev; 1369 dev = drm->dev;
1293 1370
1294#if __OS_HAS_AGP 1371#if __OS_HAS_AGP
1295 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1372 if (drm->agp.stat == ENABLED) {
1296 return ttm_agp_tt_populate(ttm); 1373 return ttm_agp_tt_populate(ttm);
1297 } 1374 }
1298#endif 1375#endif
@@ -1329,7 +1406,7 @@ static void
1329nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1406nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1330{ 1407{
1331 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1408 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1332 struct drm_nouveau_private *dev_priv; 1409 struct nouveau_drm *drm;
1333 struct drm_device *dev; 1410 struct drm_device *dev;
1334 unsigned i; 1411 unsigned i;
1335 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1412 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1337,11 +1414,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1337 if (slave) 1414 if (slave)
1338 return; 1415 return;
1339 1416
1340 dev_priv = nouveau_bdev(ttm->bdev); 1417 drm = nouveau_bdev(ttm->bdev);
1341 dev = dev_priv->dev; 1418 dev = drm->dev;
1342 1419
1343#if __OS_HAS_AGP 1420#if __OS_HAS_AGP
1344 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1421 if (drm->agp.stat == ENABLED) {
1345 ttm_agp_tt_unpopulate(ttm); 1422 ttm_agp_tt_unpopulate(ttm);
1346 return; 1423 return;
1347 } 1424 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
new file mode 100644
index 000000000000..dec51b1098fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -0,0 +1,99 @@
1#ifndef __NOUVEAU_BO_H__
2#define __NOUVEAU_BO_H__
3
4struct nouveau_channel;
5struct nouveau_fence;
6struct nouveau_vma;
7
8struct nouveau_bo {
9 struct ttm_buffer_object bo;
10 struct ttm_placement placement;
11 u32 valid_domains;
12 u32 placements[3];
13 u32 busy_placements[3];
14 struct ttm_bo_kmap_obj kmap;
15 struct list_head head;
16
17 /* protected by ttm_bo_reserve() */
18 struct drm_file *reserved_by;
19 struct list_head entry;
20 int pbbo_index;
21 bool validate_mapped;
22
23 struct list_head vma_list;
24 unsigned page_shift;
25
26 u32 tile_mode;
27 u32 tile_flags;
28 struct nouveau_drm_tile *tile;
29
30 struct drm_gem_object *gem;
31 int pin_refcnt;
32
33 struct ttm_bo_kmap_obj dma_buf_vmap;
34 int vmapping_count;
35};
36
37static inline struct nouveau_bo *
38nouveau_bo(struct ttm_buffer_object *bo)
39{
40 return container_of(bo, struct nouveau_bo, bo);
41}
42
43static inline int
44nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
45{
46 struct nouveau_bo *prev;
47
48 if (!pnvbo)
49 return -EINVAL;
50 prev = *pnvbo;
51
52 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
53 if (prev) {
54 struct ttm_buffer_object *bo = &prev->bo;
55
56 ttm_bo_unref(&bo);
57 }
58
59 return 0;
60}
61
62extern struct ttm_bo_driver nouveau_bo_driver;
63
64void nouveau_bo_move_init(struct nouveau_drm *);
65int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
66 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
67 struct nouveau_bo **);
68int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
69int nouveau_bo_unpin(struct nouveau_bo *);
70int nouveau_bo_map(struct nouveau_bo *);
71void nouveau_bo_unmap(struct nouveau_bo *);
72void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
73u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
74void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
75u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
79 bool no_wait_reserve, bool no_wait_gpu);
80
81struct nouveau_vma *
82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
83
84int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
85 struct nouveau_vma *);
86void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
87
88/* TODO: submit equivalent to TTM generic API upstream? */
89static inline void __iomem *
90nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
91{
92 bool is_iomem;
93 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
94 &nvbo->kmap, &is_iomem);
95 WARN_ON_ONCE(ioptr && !is_iomem);
96 return ioptr;
97}
98
99#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index dad96cce5e39..77959526b5f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -22,7 +22,8 @@
22 */ 22 */
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "nouveau_drv.h" 25#include "nouveau_drm.h"
26#include "nouveau_reg.h"
26#include "nouveau_hw.h" 27#include "nouveau_hw.h"
27 28
28/****************************************************************************\ 29/****************************************************************************\
@@ -195,12 +196,13 @@ static void
195nv04_update_arb(struct drm_device *dev, int VClk, int bpp, 196nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
196 int *burst, int *lwm) 197 int *burst, int *lwm)
197{ 198{
198 struct drm_nouveau_private *dev_priv = dev->dev_private; 199 struct nouveau_drm *drm = nouveau_drm(dev);
200 struct nouveau_device *device = nouveau_dev(dev);
199 struct nv_fifo_info fifo_data; 201 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data; 202 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); 203 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
202 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); 204 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
203 uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); 205 uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
204 206
205 sim_data.pclk_khz = VClk; 207 sim_data.pclk_khz = VClk;
206 sim_data.mclk_khz = MClk; 208 sim_data.mclk_khz = MClk;
@@ -218,13 +220,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
218 sim_data.mem_latency = 3; 220 sim_data.mem_latency = 3;
219 sim_data.mem_page_miss = 10; 221 sim_data.mem_page_miss = 10;
220 } else { 222 } else {
221 sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1; 223 sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
222 sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; 224 sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
223 sim_data.mem_latency = cfg1 & 0xf; 225 sim_data.mem_latency = cfg1 & 0xf;
224 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); 226 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
225 } 227 }
226 228
227 if (dev_priv->card_type == NV_04) 229 if (nv_device(drm->device)->card_type == NV_04)
228 nv04_calc_arb(&fifo_data, &sim_data); 230 nv04_calc_arb(&fifo_data, &sim_data);
229 else 231 else
230 nv10_calc_arb(&fifo_data, &sim_data); 232 nv10_calc_arb(&fifo_data, &sim_data);
@@ -249,9 +251,9 @@ nv20_update_arb(int *burst, int *lwm)
249void 251void
250nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) 252nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
251{ 253{
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 254 struct nouveau_drm *drm = nouveau_drm(dev);
253 255
254 if (dev_priv->card_type < NV_20) 256 if (nv_device(drm->device)->card_type < NV_20)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm); 257 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 258 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 259 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
@@ -260,219 +262,3 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
260 } else 262 } else
261 nv20_update_arb(burst, lwm); 263 nv20_update_arb(burst, lwm);
262} 264}
263
264static int
265getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
266 struct nouveau_pll_vals *bestpv)
267{
268 /* Find M, N and P for a single stage PLL
269 *
270 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
271 * values, but we're too lazy to use those atm
272 *
273 * "clk" parameter in kHz
274 * returns calculated clock
275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
281 int minU = pll_lim->vco1.min_inputfreq;
282 int maxU = pll_lim->vco1.max_inputfreq;
283 int minP = pll_lim->max_p ? pll_lim->min_p : 0;
284 int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
285 int crystal = pll_lim->refclk;
286 int M, N, thisP, P;
287 int clkP, calcclk;
288 int delta, bestdelta = INT_MAX;
289 int bestclk = 0;
290
291 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
292 /* possibly correlated with introduction of 27MHz crystal */
293 if (dev_priv->card_type < NV_50) {
294 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
295 if (clk > 250000)
296 maxM = 6;
297 if (clk > 340000)
298 maxM = 2;
299 } else if (cv < 0x40) {
300 if (clk > 150000)
301 maxM = 6;
302 if (clk > 200000)
303 maxM = 4;
304 if (clk > 340000)
305 maxM = 2;
306 }
307 }
308
309 P = pll_lim->max_p ? maxP : (1 << maxP);
310 if ((clk * P) < minvco) {
311 minvco = clk * maxP;
312 maxvco = minvco * 2;
313 }
314
315 if (clk + clk/200 > maxvco) /* +0.5% */
316 maxvco = clk + clk/200;
317
318 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
319 for (thisP = minP; thisP <= maxP; thisP++) {
320 P = pll_lim->max_p ? thisP : (1 << thisP);
321 clkP = clk * P;
322
323 if (clkP < minvco)
324 continue;
325 if (clkP > maxvco)
326 return bestclk;
327
328 for (M = minM; M <= maxM; M++) {
329 if (crystal/M < minU)
330 return bestclk;
331 if (crystal/M > maxU)
332 continue;
333
334 /* add crystal/2 to round better */
335 N = (clkP * M + crystal/2) / crystal;
336
337 if (N < minN)
338 continue;
339 if (N > maxN)
340 break;
341
342 /* more rounding additions */
343 calcclk = ((N * crystal + P/2) / P + M/2) / M;
344 delta = abs(calcclk - clk);
345 /* we do an exhaustive search rather than terminating
346 * on an optimality condition...
347 */
348 if (delta < bestdelta) {
349 bestdelta = delta;
350 bestclk = calcclk;
351 bestpv->N1 = N;
352 bestpv->M1 = M;
353 bestpv->log2P = thisP;
354 if (delta == 0) /* except this one */
355 return bestclk;
356 }
357 }
358 }
359
360 return bestclk;
361}
362
363static int
364getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
365 struct nouveau_pll_vals *bestpv)
366{
367 /* Find M, N and P for a two stage PLL
368 *
369 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
370 * values, but we're too lazy to use those atm
371 *
372 * "clk" parameter in kHz
373 * returns calculated clock
374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
380 int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
381 int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
382 int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
383 int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
384 int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
385 int maxlog2P = pll_lim->max_usable_log2p;
386 int crystal = pll_lim->refclk;
387 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
388 int M1, N1, M2, N2, log2P;
389 int clkP, calcclk1, calcclk2, calcclkout;
390 int delta, bestdelta = INT_MAX;
391 int bestclk = 0;
392
393 int vco2 = (maxvco2 - maxvco2/200) / 2;
394 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
395 ;
396 clkP = clk << log2P;
397
398 if (maxvco2 < clk + clk/200) /* +0.5% */
399 maxvco2 = clk + clk/200;
400
401 for (M1 = minM1; M1 <= maxM1; M1++) {
402 if (crystal/M1 < minU1)
403 return bestclk;
404 if (crystal/M1 > maxU1)
405 continue;
406
407 for (N1 = minN1; N1 <= maxN1; N1++) {
408 calcclk1 = crystal * N1 / M1;
409 if (calcclk1 < minvco1)
410 continue;
411 if (calcclk1 > maxvco1)
412 break;
413
414 for (M2 = minM2; M2 <= maxM2; M2++) {
415 if (calcclk1/M2 < minU2)
416 break;
417 if (calcclk1/M2 > maxU2)
418 continue;
419
420 /* add calcclk1/2 to round better */
421 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
422 if (N2 < minN2)
423 continue;
424 if (N2 > maxN2)
425 break;
426
427 if (!fixedgain2) {
428 if (chip_version < 0x60)
429 if (N2/M2 < 4 || N2/M2 > 10)
430 continue;
431
432 calcclk2 = calcclk1 * N2 / M2;
433 if (calcclk2 < minvco2)
434 break;
435 if (calcclk2 > maxvco2)
436 continue;
437 } else
438 calcclk2 = calcclk1;
439
440 calcclkout = calcclk2 >> log2P;
441 delta = abs(calcclkout - clk);
442 /* we do an exhaustive search rather than terminating
443 * on an optimality condition...
444 */
445 if (delta < bestdelta) {
446 bestdelta = delta;
447 bestclk = calcclkout;
448 bestpv->N1 = N1;
449 bestpv->M1 = M1;
450 bestpv->N2 = N2;
451 bestpv->M2 = M2;
452 bestpv->log2P = log2P;
453 if (delta == 0) /* except this one */
454 return bestclk;
455 }
456 }
457 }
458 }
459
460 return bestclk;
461}
462
463int
464nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
465 struct nouveau_pll_vals *pv)
466{
467 int outclk;
468
469 if (!pll_lim->vco2.maxfreq)
470 outclk = getMNP_single(dev, pll_lim, clk, pv);
471 else
472 outclk = getMNP_double(dev, pll_lim, clk, pv);
473
474 if (!outclk)
475 NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
476
477 return outclk;
478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 000000000000..c1d7301c0e9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,400 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/device.h>
28#include <core/class.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32#include <subdev/instmem.h>
33
34#include <engine/software.h>
35
36#include "nouveau_drm.h"
37#include "nouveau_dma.h"
38#include "nouveau_bo.h"
39#include "nouveau_chan.h"
40#include "nouveau_fence.h"
41#include "nouveau_abi16.h"
42
43MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
44static int nouveau_vram_pushbuf;
45module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
46
47int
48nouveau_channel_idle(struct nouveau_channel *chan)
49{
50 struct nouveau_cli *cli = chan->cli;
51 struct nouveau_fence *fence = NULL;
52 int ret;
53
54 ret = nouveau_fence_new(chan, &fence);
55 if (!ret) {
56 ret = nouveau_fence_wait(fence, false, false);
57 nouveau_fence_unref(&fence);
58 }
59
60 if (ret)
61 NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
62 return ret;
63}
64
65void
66nouveau_channel_del(struct nouveau_channel **pchan)
67{
68 struct nouveau_channel *chan = *pchan;
69 if (chan) {
70 struct nouveau_object *client = nv_object(chan->cli);
71 if (chan->fence) {
72 nouveau_channel_idle(chan);
73 nouveau_fence(chan->drm)->context_del(chan);
74 }
75 nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
78 nouveau_bo_unmap(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer);
80 kfree(chan);
81 }
82 *pchan = NULL;
83}
84
85static int
86nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
87 u32 parent, u32 handle, u32 size,
88 struct nouveau_channel **pchan)
89{
90 struct nouveau_device *device = nv_device(drm->device);
91 struct nouveau_instmem *imem = nouveau_instmem(device);
92 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
93 struct nouveau_fb *pfb = nouveau_fb(device);
94 struct nouveau_client *client = &cli->base;
95 struct nv_dma_class args = {};
96 struct nouveau_channel *chan;
97 struct nouveau_object *push;
98 u32 target;
99 int ret;
100
101 chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
102 if (!chan)
103 return -ENOMEM;
104
105 chan->cli = cli;
106 chan->drm = drm;
107 chan->handle = handle;
108
109 /* allocate memory for dma push buffer */
110 target = TTM_PL_FLAG_TT;
111 if (nouveau_vram_pushbuf)
112 target = TTM_PL_FLAG_VRAM;
113
114 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
115 &chan->push.buffer);
116 if (ret == 0) {
117 ret = nouveau_bo_pin(chan->push.buffer, target);
118 if (ret == 0)
119 ret = nouveau_bo_map(chan->push.buffer);
120 }
121
122 if (ret) {
123 nouveau_channel_del(pchan);
124 return ret;
125 }
126
127 /* create dma object covering the *entire* memory space that the
128 * pushbuf lives in, this is because the GEM code requires that
129 * we be able to call out to other (indirect) push buffers
130 */
131 chan->push.vma.offset = chan->push.buffer->bo.offset;
132 chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
133
134 if (device->card_type >= NV_50) {
135 ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
136 &chan->push.vma);
137 if (ret) {
138 nouveau_channel_del(pchan);
139 return ret;
140 }
141
142 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
143 args.start = 0;
144 args.limit = client->vm->vmm->limit - 1;
145 } else
146 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
147 u64 limit = pfb->ram.size - imem->reserved - 1;
148 if (device->card_type == NV_04) {
149 /* nv04 vram pushbuf hack, retarget to its location in
150 * the framebuffer bar rather than direct vram access..
151 * nfi why this exists, it came from the -nv ddx.
152 */
153 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
154 args.start = pci_resource_start(device->pdev, 1);
155 args.limit = args.start + limit;
156 } else {
157 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
158 args.start = 0;
159 args.limit = limit;
160 }
161 } else {
162 if (chan->drm->agp.stat == ENABLED) {
163 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
164 args.start = chan->drm->agp.base;
165 args.limit = chan->drm->agp.base +
166 chan->drm->agp.size - 1;
167 } else {
168 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
169 args.start = 0;
170 args.limit = vmm->limit - 1;
171 }
172 }
173
174 ret = nouveau_object_new(nv_object(chan->cli), parent,
175 chan->push.handle, 0x0002,
176 &args, sizeof(args), &push);
177 if (ret) {
178 nouveau_channel_del(pchan);
179 return ret;
180 }
181
182 return 0;
183}
184
185static int
186nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
187 u32 parent, u32 handle, u32 engine,
188 struct nouveau_channel **pchan)
189{
190 static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
191 NVC0_CHANNEL_IND_CLASS,
192 NV84_CHANNEL_IND_CLASS,
193 NV50_CHANNEL_IND_CLASS,
194 0 };
195 const u16 *oclass = oclasses;
196 struct nve0_channel_ind_class args;
197 struct nouveau_channel *chan;
198 int ret;
199
200 /* allocate dma push buffer */
201 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
202 *pchan = chan;
203 if (ret)
204 return ret;
205
206 /* create channel object */
207 args.pushbuf = chan->push.handle;
208 args.ioffset = 0x10000 + chan->push.vma.offset;
209 args.ilength = 0x02000;
210 args.engine = engine;
211
212 do {
213 ret = nouveau_object_new(nv_object(cli), parent, handle,
214 *oclass++, &args, sizeof(args),
215 &chan->object);
216 if (ret == 0)
217 return ret;
218 } while (*oclass);
219
220 nouveau_channel_del(pchan);
221 return ret;
222}
223
224static int
225nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
226 u32 parent, u32 handle, struct nouveau_channel **pchan)
227{
228 static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
229 NV17_CHANNEL_DMA_CLASS,
230 NV10_CHANNEL_DMA_CLASS,
231 NV03_CHANNEL_DMA_CLASS,
232 0 };
233 const u16 *oclass = oclasses;
234 struct nv03_channel_dma_class args;
235 struct nouveau_channel *chan;
236 int ret;
237
238 /* allocate dma push buffer */
239 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
240 *pchan = chan;
241 if (ret)
242 return ret;
243
244 /* create channel object */
245 args.pushbuf = chan->push.handle;
246 args.offset = chan->push.vma.offset;
247
248 do {
249 ret = nouveau_object_new(nv_object(cli), parent, handle,
250 *oclass++, &args, sizeof(args),
251 &chan->object);
252 if (ret == 0)
253 return ret;
254 } while (ret && *oclass);
255
256 nouveau_channel_del(pchan);
257 return ret;
258}
259
260static int
261nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
262{
263 struct nouveau_client *client = nv_client(chan->cli);
264 struct nouveau_device *device = nv_device(chan->drm->device);
265 struct nouveau_instmem *imem = nouveau_instmem(device);
266 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
267 struct nouveau_fb *pfb = nouveau_fb(device);
268 struct nouveau_software_chan *swch;
269 struct nouveau_object *object;
270 struct nv_dma_class args;
271 int ret, i;
272
273 /* allocate dma objects to cover all allowed vram, and gart */
274 if (device->card_type < NV_C0) {
275 if (device->card_type >= NV_50) {
276 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
277 args.start = 0;
278 args.limit = client->vm->vmm->limit - 1;
279 } else {
280 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
281 args.start = 0;
282 args.limit = pfb->ram.size - imem->reserved - 1;
283 }
284
285 ret = nouveau_object_new(nv_object(client), chan->handle, vram,
286 0x003d, &args, sizeof(args), &object);
287 if (ret)
288 return ret;
289
290 if (device->card_type >= NV_50) {
291 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
292 args.start = 0;
293 args.limit = client->vm->vmm->limit - 1;
294 } else
295 if (chan->drm->agp.stat == ENABLED) {
296 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
297 args.start = chan->drm->agp.base;
298 args.limit = chan->drm->agp.base +
299 chan->drm->agp.size - 1;
300 } else {
301 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
302 args.start = 0;
303 args.limit = vmm->limit - 1;
304 }
305
306 ret = nouveau_object_new(nv_object(client), chan->handle, gart,
307 0x003d, &args, sizeof(args), &object);
308 if (ret)
309 return ret;
310
311 chan->vram = vram;
312 chan->gart = gart;
313 }
314
315 /* initialise dma tracking parameters */
316 switch (nv_hclass(chan->object) & 0x00ff) {
317 case 0x006b:
318 case 0x006e:
319 chan->user_put = 0x40;
320 chan->user_get = 0x44;
321 chan->dma.max = (0x10000 / 4) - 2;
322 break;
323 default:
324 chan->user_put = 0x40;
325 chan->user_get = 0x44;
326 chan->user_get_hi = 0x60;
327 chan->dma.ib_base = 0x10000 / 4;
328 chan->dma.ib_max = (0x02000 / 8) - 1;
329 chan->dma.ib_put = 0;
330 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
331 chan->dma.max = chan->dma.ib_base;
332 break;
333 }
334
335 chan->dma.put = 0;
336 chan->dma.cur = chan->dma.put;
337 chan->dma.free = chan->dma.max - chan->dma.cur;
338
339 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
340 if (ret)
341 return ret;
342
343 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
344 OUT_RING(chan, 0x00000000);
345
346 /* allocate software object class (used for fences on <= nv05, and
347 * to signal flip completion), bind it to a subchannel.
348 */
349 if (chan != chan->drm->cechan) {
350 ret = nouveau_object_new(nv_object(client), chan->handle,
351 NvSw, nouveau_abi16_swclass(chan->drm),
352 NULL, 0, &object);
353 if (ret)
354 return ret;
355
356 swch = (void *)object->parent;
357 swch->flip = nouveau_flip_complete;
358 swch->flip_data = chan;
359 }
360
361 if (device->card_type < NV_C0) {
362 ret = RING_SPACE(chan, 2);
363 if (ret)
364 return ret;
365
366 BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
367 OUT_RING (chan, NvSw);
368 FIRE_RING (chan);
369 }
370
371 /* initialise synchronisation */
372 return nouveau_fence(chan->drm)->context_new(chan);
373}
374
375int
376nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
377 u32 parent, u32 handle, u32 arg0, u32 arg1,
378 struct nouveau_channel **pchan)
379{
380 int ret;
381
382 ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
383 if (ret) {
384 NV_DEBUG(cli, "ib channel create, %d\n", ret);
385 ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
386 if (ret) {
387 NV_DEBUG(cli, "dma channel create, %d\n", ret);
388 return ret;
389 }
390 }
391
392 ret = nouveau_channel_init(*pchan, arg0, arg1);
393 if (ret) {
394 NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
395 nouveau_channel_del(pchan);
396 return ret;
397 }
398
399 return 0;
400}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 000000000000..40f97e2c47b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_CHAN_H__
2#define __NOUVEAU_CHAN_H__
3
4struct nouveau_cli;
5
6struct nouveau_channel {
7 struct nouveau_cli *cli;
8 struct nouveau_drm *drm;
9
10 u32 handle;
11 u32 vram;
12 u32 gart;
13
14 struct {
15 struct nouveau_bo *buffer;
16 struct nouveau_vma vma;
17 u32 handle;
18 } push;
19
20 /* TODO: this will be reworked in the near future */
21 bool accel_done;
22 void *fence;
23 struct {
24 int max;
25 int free;
26 int cur;
27 int put;
28 int ib_base;
29 int ib_max;
30 int ib_free;
31 int ib_put;
32 } dma;
33 u32 user_get_hi;
34 u32 user_get;
35 u32 user_put;
36
37 struct nouveau_object *object;
38};
39
40
41int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
42 u32 parent, u32 handle, u32 arg0, u32 arg1,
43 struct nouveau_channel **);
44void nouveau_channel_del(struct nouveau_channel **);
45int nouveau_channel_idle(struct nouveau_channel *);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
deleted file mode 100644
index debd90225a88..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ /dev/null
@@ -1,397 +0,0 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_fence.h"
33#include "nouveau_software.h"
34
35static int
36nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
37{
38 u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
39 struct drm_device *dev = chan->dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 int ret;
42
43 /* allocate buffer object */
44 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
45 if (ret)
46 goto out;
47
48 ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
49 if (ret)
50 goto out;
51
52 ret = nouveau_bo_map(chan->pushbuf_bo);
53 if (ret)
54 goto out;
55
56 /* create DMA object covering the entire memtype where the push
57 * buffer resides, userspace can submit its own push buffers from
58 * anywhere within the same memtype.
59 */
60 chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
61 if (dev_priv->card_type >= NV_50) {
62 ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
63 &chan->pushbuf_vma);
64 if (ret)
65 goto out;
66
67 if (dev_priv->card_type < NV_C0) {
68 ret = nouveau_gpuobj_dma_new(chan,
69 NV_CLASS_DMA_IN_MEMORY, 0,
70 (1ULL << 40),
71 NV_MEM_ACCESS_RO,
72 NV_MEM_TARGET_VM,
73 &chan->pushbuf);
74 }
75 chan->pushbuf_base = chan->pushbuf_vma.offset;
76 } else
77 if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
78 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
79 dev_priv->gart_info.aper_size,
80 NV_MEM_ACCESS_RO,
81 NV_MEM_TARGET_GART,
82 &chan->pushbuf);
83 } else
84 if (dev_priv->card_type != NV_04) {
85 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
86 dev_priv->fb_available_size,
87 NV_MEM_ACCESS_RO,
88 NV_MEM_TARGET_VRAM,
89 &chan->pushbuf);
90 } else {
91 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
92 * exact reason for existing :) PCI access to cmdbuf in
93 * VRAM.
94 */
95 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
96 pci_resource_start(dev->pdev, 1),
97 dev_priv->fb_available_size,
98 NV_MEM_ACCESS_RO,
99 NV_MEM_TARGET_PCI,
100 &chan->pushbuf);
101 }
102
103out:
104 if (ret) {
105 NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
106 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
107 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
108 if (chan->pushbuf_bo) {
109 nouveau_bo_unmap(chan->pushbuf_bo);
110 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
111 }
112 }
113
114 return 0;
115}
116
117/* allocates and initializes a fifo for user space consumption */
118int
119nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
120 struct drm_file *file_priv,
121 uint32_t vram_handle, uint32_t gart_handle)
122{
123 struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
125 struct drm_nouveau_private *dev_priv = dev->dev_private;
126 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
127 struct nouveau_channel *chan;
128 unsigned long flags;
129 int ret, i;
130
131 /* allocate and lock channel structure */
132 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
133 if (!chan)
134 return -ENOMEM;
135 chan->dev = dev;
136 chan->file_priv = file_priv;
137 chan->vram_handle = vram_handle;
138 chan->gart_handle = gart_handle;
139
140 kref_init(&chan->ref);
141 atomic_set(&chan->users, 1);
142 mutex_init(&chan->mutex);
143 mutex_lock(&chan->mutex);
144
145 /* allocate hw channel id */
146 spin_lock_irqsave(&dev_priv->channels.lock, flags);
147 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
148 if (!dev_priv->channels.ptr[chan->id]) {
149 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
150 break;
151 }
152 }
153 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
154
155 if (chan->id == pfifo->channels) {
156 mutex_unlock(&chan->mutex);
157 kfree(chan);
158 return -ENODEV;
159 }
160
161 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
162
163 /* setup channel's memory and vm */
164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
165 if (ret) {
166 NV_ERROR(dev, "gpuobj %d\n", ret);
167 nouveau_channel_put(&chan);
168 return ret;
169 }
170
171 /* Allocate space for per-channel fixed notifier memory */
172 ret = nouveau_notifier_init_channel(chan);
173 if (ret) {
174 NV_ERROR(dev, "ntfy %d\n", ret);
175 nouveau_channel_put(&chan);
176 return ret;
177 }
178
179 /* Allocate DMA push buffer */
180 ret = nouveau_channel_pushbuf_init(chan);
181 if (ret) {
182 NV_ERROR(dev, "pushbuf %d\n", ret);
183 nouveau_channel_put(&chan);
184 return ret;
185 }
186
187 nouveau_dma_init(chan);
188 chan->user_put = 0x40;
189 chan->user_get = 0x44;
190 if (dev_priv->card_type >= NV_50)
191 chan->user_get_hi = 0x60;
192
193 /* create fifo context */
194 ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
195 if (ret) {
196 nouveau_channel_put(&chan);
197 return ret;
198 }
199
200 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
201 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
202 if (ret) {
203 nouveau_channel_put(&chan);
204 return ret;
205 }
206
207 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
208 OUT_RING (chan, 0x00000000);
209
210 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
211 if (ret) {
212 nouveau_channel_put(&chan);
213 return ret;
214 }
215
216 if (dev_priv->card_type < NV_C0) {
217 ret = RING_SPACE(chan, 2);
218 if (ret) {
219 nouveau_channel_put(&chan);
220 return ret;
221 }
222
223 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
224 OUT_RING (chan, NvSw);
225 FIRE_RING (chan);
226 }
227
228 FIRE_RING(chan);
229
230 ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
231 if (ret) {
232 nouveau_channel_put(&chan);
233 return ret;
234 }
235
236 nouveau_debugfs_channel_init(chan);
237
238 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
239 if (fpriv) {
240 spin_lock(&fpriv->lock);
241 list_add(&chan->list, &fpriv->channels);
242 spin_unlock(&fpriv->lock);
243 }
244 *chan_ret = chan;
245 return 0;
246}
247
248struct nouveau_channel *
249nouveau_channel_get_unlocked(struct nouveau_channel *ref)
250{
251 struct nouveau_channel *chan = NULL;
252
253 if (likely(ref && atomic_inc_not_zero(&ref->users)))
254 nouveau_channel_ref(ref, &chan);
255
256 return chan;
257}
258
259struct nouveau_channel *
260nouveau_channel_get(struct drm_file *file_priv, int id)
261{
262 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
263 struct nouveau_channel *chan;
264
265 spin_lock(&fpriv->lock);
266 list_for_each_entry(chan, &fpriv->channels, list) {
267 if (chan->id == id) {
268 chan = nouveau_channel_get_unlocked(chan);
269 spin_unlock(&fpriv->lock);
270 mutex_lock(&chan->mutex);
271 return chan;
272 }
273 }
274 spin_unlock(&fpriv->lock);
275
276 return ERR_PTR(-EINVAL);
277}
278
279void
280nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
281{
282 struct nouveau_channel *chan = *pchan;
283 struct drm_device *dev = chan->dev;
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 unsigned long flags;
286 int i;
287
288 /* decrement the refcount, and we're done if there's still refs */
289 if (likely(!atomic_dec_and_test(&chan->users))) {
290 nouveau_channel_ref(NULL, pchan);
291 return;
292 }
293
294 /* no one wants the channel anymore */
295 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
296 nouveau_debugfs_channel_fini(chan);
297
298 /* give it chance to idle */
299 nouveau_channel_idle(chan);
300
301 /* destroy the engine specific contexts */
302 for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
303 if (chan->engctx[i])
304 dev_priv->eng[i]->context_del(chan, i);
305 }
306
307 /* aside from its resources, the channel should now be dead,
308 * remove it from the channel list
309 */
310 spin_lock_irqsave(&dev_priv->channels.lock, flags);
311 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313
314 /* destroy any resources the channel owned */
315 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
316 if (chan->pushbuf_bo) {
317 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
318 nouveau_bo_unmap(chan->pushbuf_bo);
319 nouveau_bo_unpin(chan->pushbuf_bo);
320 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
321 }
322 nouveau_ramht_ref(NULL, &chan->ramht, chan);
323 nouveau_notifier_takedown_channel(chan);
324 nouveau_gpuobj_channel_takedown(chan);
325
326 nouveau_channel_ref(NULL, pchan);
327}
328
329void
330nouveau_channel_put(struct nouveau_channel **pchan)
331{
332 mutex_unlock(&(*pchan)->mutex);
333 nouveau_channel_put_unlocked(pchan);
334}
335
336static void
337nouveau_channel_del(struct kref *ref)
338{
339 struct nouveau_channel *chan =
340 container_of(ref, struct nouveau_channel, ref);
341
342 kfree(chan);
343}
344
345void
346nouveau_channel_ref(struct nouveau_channel *chan,
347 struct nouveau_channel **pchan)
348{
349 if (chan)
350 kref_get(&chan->ref);
351
352 if (*pchan)
353 kref_put(&(*pchan)->ref, nouveau_channel_del);
354
355 *pchan = chan;
356}
357
358int
359nouveau_channel_idle(struct nouveau_channel *chan)
360{
361 struct drm_device *dev = chan->dev;
362 struct nouveau_fence *fence = NULL;
363 int ret;
364
365 ret = nouveau_fence_new(chan, &fence);
366 if (!ret) {
367 ret = nouveau_fence_wait(fence, false, false);
368 nouveau_fence_unref(&fence);
369 }
370
371 if (ret)
372 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
373 return ret;
374}
375
376/* cleans up all the fifos from file_priv */
377void
378nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
379{
380 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
381 struct nouveau_channel *chan;
382 int i;
383
384 if (!pfifo)
385 return;
386
387 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
388 for (i = 0; i < pfifo->channels; i++) {
389 chan = nouveau_channel_get(file_priv, i);
390 if (IS_ERR(chan))
391 continue;
392
393 list_del(&chan->list);
394 atomic_dec(&chan->users);
395 nouveau_channel_put(&chan);
396 }
397}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7b11edb077d0..702e2a74d2d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -31,12 +31,29 @@
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32 32
33#include "nouveau_reg.h" 33#include "nouveau_reg.h"
34#include "nouveau_drv.h" 34#include "nouveau_drm.h"
35#include "nouveau_hw.h"
36#include "nouveau_acpi.h"
37
38#include "nouveau_display.h"
39#include "nouveau_connector.h"
35#include "nouveau_encoder.h" 40#include "nouveau_encoder.h"
36#include "nouveau_crtc.h" 41#include "nouveau_crtc.h"
37#include "nouveau_connector.h" 42
38#include "nouveau_gpio.h" 43#include <subdev/i2c.h>
39#include "nouveau_hw.h" 44#include <subdev/gpio.h>
45
46MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
47static int nouveau_tv_disable = 0;
48module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
49
50MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
51static int nouveau_ignorelid = 0;
52module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
53
54MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
55static int nouveau_duallink = 1;
56module_param_named(duallink, nouveau_duallink, int, 0400);
40 57
41static void nouveau_connector_hotplug(void *, int); 58static void nouveau_connector_hotplug(void *, int);
42 59
@@ -58,7 +75,7 @@ find_encoder(struct drm_connector *connector, int type)
58 continue; 75 continue;
59 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 76 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
60 77
61 if (type == OUTPUT_ANY || nv_encoder->dcb->type == type) 78 if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
62 return nv_encoder; 79 return nv_encoder;
63 } 80 }
64 81
@@ -83,19 +100,21 @@ static void
83nouveau_connector_destroy(struct drm_connector *connector) 100nouveau_connector_destroy(struct drm_connector *connector)
84{ 101{
85 struct nouveau_connector *nv_connector = nouveau_connector(connector); 102 struct nouveau_connector *nv_connector = nouveau_connector(connector);
86 struct drm_nouveau_private *dev_priv; 103 struct nouveau_gpio *gpio;
104 struct nouveau_drm *drm;
87 struct drm_device *dev; 105 struct drm_device *dev;
88 106
89 if (!nv_connector) 107 if (!nv_connector)
90 return; 108 return;
91 109
92 dev = nv_connector->base.dev; 110 dev = nv_connector->base.dev;
93 dev_priv = dev->dev_private; 111 drm = nouveau_drm(dev);
94 NV_DEBUG_KMS(dev, "\n"); 112 gpio = nouveau_gpio(drm->device);
113 NV_DEBUG(drm, "\n");
95 114
96 if (nv_connector->hpd != DCB_GPIO_UNUSED) { 115 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
97 nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff, 116 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
98 nouveau_connector_hotplug, connector); 117 nouveau_connector_hotplug, connector);
99 } 118 }
100 119
101 kfree(nv_connector->edid); 120 kfree(nv_connector->edid);
@@ -104,15 +123,17 @@ nouveau_connector_destroy(struct drm_connector *connector)
104 kfree(connector); 123 kfree(connector);
105} 124}
106 125
107static struct nouveau_i2c_chan * 126static struct nouveau_i2c_port *
108nouveau_connector_ddc_detect(struct drm_connector *connector, 127nouveau_connector_ddc_detect(struct drm_connector *connector,
109 struct nouveau_encoder **pnv_encoder) 128 struct nouveau_encoder **pnv_encoder)
110{ 129{
111 struct drm_device *dev = connector->dev; 130 struct drm_device *dev = connector->dev;
131 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
112 int i; 133 int i;
113 134
114 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 135 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
115 struct nouveau_i2c_chan *i2c = NULL; 136 struct nouveau_i2c_port *port = NULL;
116 struct nouveau_encoder *nv_encoder; 137 struct nouveau_encoder *nv_encoder;
117 struct drm_mode_object *obj; 138 struct drm_mode_object *obj;
118 int id; 139 int id;
@@ -127,11 +148,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
127 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 148 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
128 149
129 if (nv_encoder->dcb->i2c_index < 0xf) 150 if (nv_encoder->dcb->i2c_index < 0xf)
130 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 151 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
131 152 if (port && nv_probe_i2c(port, 0x50)) {
132 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
133 *pnv_encoder = nv_encoder; 153 *pnv_encoder = nv_encoder;
134 return i2c; 154 return port;
135 } 155 }
136 } 156 }
137 157
@@ -148,8 +168,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
148 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); 168 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
149 169
150 if (!dn || 170 if (!dn ||
151 !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) || 171 !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
152 (nv_encoder = find_encoder(connector, OUTPUT_ANALOG)))) 172 (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
153 return NULL; 173 return NULL;
154 174
155 for_each_child_of_node(dn, cn) { 175 for_each_child_of_node(dn, cn) {
@@ -173,25 +193,25 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
173 struct nouveau_encoder *nv_encoder) 193 struct nouveau_encoder *nv_encoder)
174{ 194{
175 struct nouveau_connector *nv_connector = nouveau_connector(connector); 195 struct nouveau_connector *nv_connector = nouveau_connector(connector);
176 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 196 struct nouveau_drm *drm = nouveau_drm(connector->dev);
177 struct drm_device *dev = connector->dev; 197 struct drm_device *dev = connector->dev;
178 198
179 if (nv_connector->detected_encoder == nv_encoder) 199 if (nv_connector->detected_encoder == nv_encoder)
180 return; 200 return;
181 nv_connector->detected_encoder = nv_encoder; 201 nv_connector->detected_encoder = nv_encoder;
182 202
183 if (dev_priv->card_type >= NV_50) { 203 if (nv_device(drm->device)->card_type >= NV_50) {
184 connector->interlace_allowed = true; 204 connector->interlace_allowed = true;
185 connector->doublescan_allowed = true; 205 connector->doublescan_allowed = true;
186 } else 206 } else
187 if (nv_encoder->dcb->type == OUTPUT_LVDS || 207 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
188 nv_encoder->dcb->type == OUTPUT_TMDS) { 208 nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
189 connector->doublescan_allowed = false; 209 connector->doublescan_allowed = false;
190 connector->interlace_allowed = false; 210 connector->interlace_allowed = false;
191 } else { 211 } else {
192 connector->doublescan_allowed = true; 212 connector->doublescan_allowed = true;
193 if (dev_priv->card_type == NV_20 || 213 if (nv_device(drm->device)->card_type == NV_20 ||
194 (dev_priv->card_type == NV_10 && 214 (nv_device(drm->device)->card_type == NV_10 &&
195 (dev->pci_device & 0x0ff0) != 0x0100 && 215 (dev->pci_device & 0x0ff0) != 0x0100 &&
196 (dev->pci_device & 0x0ff0) != 0x0150)) 216 (dev->pci_device & 0x0ff0) != 0x0150))
197 /* HW is broken */ 217 /* HW is broken */
@@ -203,7 +223,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
203 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 223 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
204 drm_connector_property_set_value(connector, 224 drm_connector_property_set_value(connector,
205 dev->mode_config.dvi_i_subconnector_property, 225 dev->mode_config.dvi_i_subconnector_property,
206 nv_encoder->dcb->type == OUTPUT_TMDS ? 226 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
207 DRM_MODE_SUBCONNECTOR_DVID : 227 DRM_MODE_SUBCONNECTOR_DVID :
208 DRM_MODE_SUBCONNECTOR_DVIA); 228 DRM_MODE_SUBCONNECTOR_DVIA);
209 } 229 }
@@ -213,10 +233,11 @@ static enum drm_connector_status
213nouveau_connector_detect(struct drm_connector *connector, bool force) 233nouveau_connector_detect(struct drm_connector *connector, bool force)
214{ 234{
215 struct drm_device *dev = connector->dev; 235 struct drm_device *dev = connector->dev;
236 struct nouveau_drm *drm = nouveau_drm(dev);
216 struct nouveau_connector *nv_connector = nouveau_connector(connector); 237 struct nouveau_connector *nv_connector = nouveau_connector(connector);
217 struct nouveau_encoder *nv_encoder = NULL; 238 struct nouveau_encoder *nv_encoder = NULL;
218 struct nouveau_encoder *nv_partner; 239 struct nouveau_encoder *nv_partner;
219 struct nouveau_i2c_chan *i2c; 240 struct nouveau_i2c_port *i2c;
220 int type; 241 int type;
221 242
222 /* Cleanup the previous EDID block. */ 243 /* Cleanup the previous EDID block. */
@@ -232,14 +253,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
232 drm_mode_connector_update_edid_property(connector, 253 drm_mode_connector_update_edid_property(connector,
233 nv_connector->edid); 254 nv_connector->edid);
234 if (!nv_connector->edid) { 255 if (!nv_connector->edid) {
235 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 256 NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
236 drm_get_connector_name(connector)); 257 drm_get_connector_name(connector));
237 goto detect_analog; 258 goto detect_analog;
238 } 259 }
239 260
240 if (nv_encoder->dcb->type == OUTPUT_DP && 261 if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
241 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { 262 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
242 NV_ERROR(dev, "Detected %s, but failed init\n", 263 NV_ERROR(drm, "Detected %s, but failed init\n",
243 drm_get_connector_name(connector)); 264 drm_get_connector_name(connector));
244 return connector_status_disconnected; 265 return connector_status_disconnected;
245 } 266 }
@@ -250,19 +271,19 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
250 * isn't necessarily correct. 271 * isn't necessarily correct.
251 */ 272 */
252 nv_partner = NULL; 273 nv_partner = NULL;
253 if (nv_encoder->dcb->type == OUTPUT_TMDS) 274 if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
254 nv_partner = find_encoder(connector, OUTPUT_ANALOG); 275 nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
255 if (nv_encoder->dcb->type == OUTPUT_ANALOG) 276 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
256 nv_partner = find_encoder(connector, OUTPUT_TMDS); 277 nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
257 278
258 if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG && 279 if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
259 nv_partner->dcb->type == OUTPUT_TMDS) || 280 nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
260 (nv_encoder->dcb->type == OUTPUT_TMDS && 281 (nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
261 nv_partner->dcb->type == OUTPUT_ANALOG))) { 282 nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
262 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 283 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
263 type = OUTPUT_TMDS; 284 type = DCB_OUTPUT_TMDS;
264 else 285 else
265 type = OUTPUT_ANALOG; 286 type = DCB_OUTPUT_ANALOG;
266 287
267 nv_encoder = find_encoder(connector, type); 288 nv_encoder = find_encoder(connector, type);
268 } 289 }
@@ -278,9 +299,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
278 } 299 }
279 300
280detect_analog: 301detect_analog:
281 nv_encoder = find_encoder(connector, OUTPUT_ANALOG); 302 nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
282 if (!nv_encoder && !nouveau_tv_disable) 303 if (!nv_encoder && !nouveau_tv_disable)
283 nv_encoder = find_encoder(connector, OUTPUT_TV); 304 nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
284 if (nv_encoder && force) { 305 if (nv_encoder && force) {
285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 306 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
286 struct drm_encoder_helper_funcs *helper = 307 struct drm_encoder_helper_funcs *helper =
@@ -301,7 +322,7 @@ static enum drm_connector_status
301nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) 322nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
302{ 323{
303 struct drm_device *dev = connector->dev; 324 struct drm_device *dev = connector->dev;
304 struct drm_nouveau_private *dev_priv = dev->dev_private; 325 struct nouveau_drm *drm = nouveau_drm(dev);
305 struct nouveau_connector *nv_connector = nouveau_connector(connector); 326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
306 struct nouveau_encoder *nv_encoder = NULL; 327 struct nouveau_encoder *nv_encoder = NULL;
307 enum drm_connector_status status = connector_status_disconnected; 328 enum drm_connector_status status = connector_status_disconnected;
@@ -313,12 +334,12 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
313 nv_connector->edid = NULL; 334 nv_connector->edid = NULL;
314 } 335 }
315 336
316 nv_encoder = find_encoder(connector, OUTPUT_LVDS); 337 nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
317 if (!nv_encoder) 338 if (!nv_encoder)
318 return connector_status_disconnected; 339 return connector_status_disconnected;
319 340
320 /* Try retrieving EDID via DDC */ 341 /* Try retrieving EDID via DDC */
321 if (!dev_priv->vbios.fp_no_ddc) { 342 if (!drm->vbios.fp_no_ddc) {
322 status = nouveau_connector_detect(connector, force); 343 status = nouveau_connector_detect(connector, force);
323 if (status == connector_status_connected) 344 if (status == connector_status_connected)
324 goto out; 345 goto out;
@@ -334,7 +355,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
334 * valid - it's not (rh#613284) 355 * valid - it's not (rh#613284)
335 */ 356 */
336 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { 357 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
337 if (!nouveau_acpi_edid(dev, connector)) { 358 if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
338 status = connector_status_connected; 359 status = connector_status_connected;
339 goto out; 360 goto out;
340 } 361 }
@@ -344,7 +365,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
344 * modeline is avalilable for the panel, set it as the panel's 365 * modeline is avalilable for the panel, set it as the panel's
345 * native mode and exit. 366 * native mode and exit.
346 */ 367 */
347 if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || 368 if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
348 nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { 369 nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
349 status = connector_status_connected; 370 status = connector_status_connected;
350 goto out; 371 goto out;
@@ -353,7 +374,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
353 /* Still nothing, some VBIOS images have a hardcoded EDID block 374 /* Still nothing, some VBIOS images have a hardcoded EDID block
354 * stored for the panel stored in them. 375 * stored for the panel stored in them.
355 */ 376 */
356 if (!dev_priv->vbios.fp_no_ddc) { 377 if (!drm->vbios.fp_no_ddc) {
357 struct edid *edid = 378 struct edid *edid =
358 (struct edid *)nouveau_bios_embedded_edid(dev); 379 (struct edid *)nouveau_bios_embedded_edid(dev);
359 if (edid) { 380 if (edid) {
@@ -379,21 +400,22 @@ out:
379static void 400static void
380nouveau_connector_force(struct drm_connector *connector) 401nouveau_connector_force(struct drm_connector *connector)
381{ 402{
403 struct nouveau_drm *drm = nouveau_drm(connector->dev);
382 struct nouveau_connector *nv_connector = nouveau_connector(connector); 404 struct nouveau_connector *nv_connector = nouveau_connector(connector);
383 struct nouveau_encoder *nv_encoder; 405 struct nouveau_encoder *nv_encoder;
384 int type; 406 int type;
385 407
386 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 408 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
387 if (connector->force == DRM_FORCE_ON_DIGITAL) 409 if (connector->force == DRM_FORCE_ON_DIGITAL)
388 type = OUTPUT_TMDS; 410 type = DCB_OUTPUT_TMDS;
389 else 411 else
390 type = OUTPUT_ANALOG; 412 type = DCB_OUTPUT_ANALOG;
391 } else 413 } else
392 type = OUTPUT_ANY; 414 type = DCB_OUTPUT_ANY;
393 415
394 nv_encoder = find_encoder(connector, type); 416 nv_encoder = find_encoder(connector, type);
395 if (!nv_encoder) { 417 if (!nv_encoder) {
396 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", 418 NV_ERROR(drm, "can't find encoder to force %s on!\n",
397 drm_get_connector_name(connector)); 419 drm_get_connector_name(connector));
398 connector->status = connector_status_disconnected; 420 connector->status = connector_status_disconnected;
399 return; 421 return;
@@ -406,8 +428,7 @@ static int
406nouveau_connector_set_property(struct drm_connector *connector, 428nouveau_connector_set_property(struct drm_connector *connector,
407 struct drm_property *property, uint64_t value) 429 struct drm_property *property, uint64_t value)
408{ 430{
409 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 431 struct nouveau_display *disp = nouveau_display(connector->dev);
410 struct nouveau_display_engine *disp = &dev_priv->engine.display;
411 struct nouveau_connector *nv_connector = nouveau_connector(connector); 432 struct nouveau_connector *nv_connector = nouveau_connector(connector);
412 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 433 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
413 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 434 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -532,7 +553,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
532 } 553 }
533 } 554 }
534 555
535 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) 556 if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
536 return get_slave_funcs(encoder)->set_property( 557 return get_slave_funcs(encoder)->set_property(
537 encoder, connector, property, value); 558 encoder, connector, property, value);
538 559
@@ -543,6 +564,7 @@ static struct drm_display_mode *
543nouveau_connector_native_mode(struct drm_connector *connector) 564nouveau_connector_native_mode(struct drm_connector *connector)
544{ 565{
545 struct drm_connector_helper_funcs *helper = connector->helper_private; 566 struct drm_connector_helper_funcs *helper = connector->helper_private;
567 struct nouveau_drm *drm = nouveau_drm(connector->dev);
546 struct nouveau_connector *nv_connector = nouveau_connector(connector); 568 struct nouveau_connector *nv_connector = nouveau_connector(connector);
547 struct drm_device *dev = connector->dev; 569 struct drm_device *dev = connector->dev;
548 struct drm_display_mode *mode, *largest = NULL; 570 struct drm_display_mode *mode, *largest = NULL;
@@ -556,7 +578,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
556 578
557 /* Use preferred mode if there is one.. */ 579 /* Use preferred mode if there is one.. */
558 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 580 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
559 NV_DEBUG_KMS(dev, "native mode from preferred\n"); 581 NV_DEBUG(drm, "native mode from preferred\n");
560 return drm_mode_duplicate(dev, mode); 582 return drm_mode_duplicate(dev, mode);
561 } 583 }
562 584
@@ -579,7 +601,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
579 largest = mode; 601 largest = mode;
580 } 602 }
581 603
582 NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n", 604 NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
583 high_w, high_h, high_v); 605 high_w, high_h, high_v);
584 return largest ? drm_mode_duplicate(dev, largest) : NULL; 606 return largest ? drm_mode_duplicate(dev, largest) : NULL;
585} 607}
@@ -643,10 +665,10 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
643static void 665static void
644nouveau_connector_detect_depth(struct drm_connector *connector) 666nouveau_connector_detect_depth(struct drm_connector *connector)
645{ 667{
646 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 668 struct nouveau_drm *drm = nouveau_drm(connector->dev);
647 struct nouveau_connector *nv_connector = nouveau_connector(connector); 669 struct nouveau_connector *nv_connector = nouveau_connector(connector);
648 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 670 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
649 struct nvbios *bios = &dev_priv->vbios; 671 struct nvbios *bios = &drm->vbios;
650 struct drm_display_mode *mode = nv_connector->native_mode; 672 struct drm_display_mode *mode = nv_connector->native_mode;
651 bool duallink; 673 bool duallink;
652 674
@@ -661,7 +683,7 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
661 } 683 }
662 684
663 /* we're out of options unless we're LVDS, default to 8bpc */ 685 /* we're out of options unless we're LVDS, default to 8bpc */
664 if (nv_encoder->dcb->type != OUTPUT_LVDS) { 686 if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
665 connector->display_info.bpc = 8; 687 connector->display_info.bpc = 8;
666 return; 688 return;
667 } 689 }
@@ -693,7 +715,7 @@ static int
693nouveau_connector_get_modes(struct drm_connector *connector) 715nouveau_connector_get_modes(struct drm_connector *connector)
694{ 716{
695 struct drm_device *dev = connector->dev; 717 struct drm_device *dev = connector->dev;
696 struct drm_nouveau_private *dev_priv = dev->dev_private; 718 struct nouveau_drm *drm = nouveau_drm(dev);
697 struct nouveau_connector *nv_connector = nouveau_connector(connector); 719 struct nouveau_connector *nv_connector = nouveau_connector(connector);
698 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 720 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
699 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 721 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -709,9 +731,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
709 if (nv_connector->edid) 731 if (nv_connector->edid)
710 ret = drm_add_edid_modes(connector, nv_connector->edid); 732 ret = drm_add_edid_modes(connector, nv_connector->edid);
711 else 733 else
712 if (nv_encoder->dcb->type == OUTPUT_LVDS && 734 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
713 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 735 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
714 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 736 drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
715 struct drm_display_mode mode; 737 struct drm_display_mode mode;
716 738
717 nouveau_bios_fp_mode(dev, &mode); 739 nouveau_bios_fp_mode(dev, &mode);
@@ -746,7 +768,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
746 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 768 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
747 nouveau_connector_detect_depth(connector); 769 nouveau_connector_detect_depth(connector);
748 770
749 if (nv_encoder->dcb->type == OUTPUT_TV) 771 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
750 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 772 ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
751 773
752 if (nv_connector->type == DCB_CONNECTOR_LVDS || 774 if (nv_connector->type == DCB_CONNECTOR_LVDS ||
@@ -761,15 +783,15 @@ static unsigned
761get_tmds_link_bandwidth(struct drm_connector *connector) 783get_tmds_link_bandwidth(struct drm_connector *connector)
762{ 784{
763 struct nouveau_connector *nv_connector = nouveau_connector(connector); 785 struct nouveau_connector *nv_connector = nouveau_connector(connector);
764 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 786 struct nouveau_drm *drm = nouveau_drm(connector->dev);
765 struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; 787 struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
766 788
767 if (dcb->location != DCB_LOC_ON_CHIP || 789 if (dcb->location != DCB_LOC_ON_CHIP ||
768 dev_priv->chipset >= 0x46) 790 nv_device(drm->device)->chipset >= 0x46)
769 return 165000; 791 return 165000;
770 else if (dev_priv->chipset >= 0x40) 792 else if (nv_device(drm->device)->chipset >= 0x40)
771 return 155000; 793 return 155000;
772 else if (dev_priv->chipset >= 0x18) 794 else if (nv_device(drm->device)->chipset >= 0x18)
773 return 135000; 795 return 135000;
774 else 796 else
775 return 112000; 797 return 112000;
@@ -786,7 +808,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
786 unsigned clock = mode->clock; 808 unsigned clock = mode->clock;
787 809
788 switch (nv_encoder->dcb->type) { 810 switch (nv_encoder->dcb->type) {
789 case OUTPUT_LVDS: 811 case DCB_OUTPUT_LVDS:
790 if (nv_connector->native_mode && 812 if (nv_connector->native_mode &&
791 (mode->hdisplay > nv_connector->native_mode->hdisplay || 813 (mode->hdisplay > nv_connector->native_mode->hdisplay ||
792 mode->vdisplay > nv_connector->native_mode->vdisplay)) 814 mode->vdisplay > nv_connector->native_mode->vdisplay))
@@ -795,19 +817,19 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
795 min_clock = 0; 817 min_clock = 0;
796 max_clock = 400000; 818 max_clock = 400000;
797 break; 819 break;
798 case OUTPUT_TMDS: 820 case DCB_OUTPUT_TMDS:
799 max_clock = get_tmds_link_bandwidth(connector); 821 max_clock = get_tmds_link_bandwidth(connector);
800 if (nouveau_duallink && nv_encoder->dcb->duallink_possible) 822 if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
801 max_clock *= 2; 823 max_clock *= 2;
802 break; 824 break;
803 case OUTPUT_ANALOG: 825 case DCB_OUTPUT_ANALOG:
804 max_clock = nv_encoder->dcb->crtconf.maxfreq; 826 max_clock = nv_encoder->dcb->crtconf.maxfreq;
805 if (!max_clock) 827 if (!max_clock)
806 max_clock = 350000; 828 max_clock = 350000;
807 break; 829 break;
808 case OUTPUT_TV: 830 case DCB_OUTPUT_TV:
809 return get_slave_funcs(encoder)->mode_valid(encoder, mode); 831 return get_slave_funcs(encoder)->mode_valid(encoder, mode);
810 case OUTPUT_DP: 832 case DCB_OUTPUT_DP:
811 max_clock = nv_encoder->dp.link_nr; 833 max_clock = nv_encoder->dp.link_nr;
812 max_clock *= nv_encoder->dp.link_bw; 834 max_clock *= nv_encoder->dp.link_bw;
813 clock = clock * (connector->display_info.bpc * 3) / 10; 835 clock = clock * (connector->display_info.bpc * 3) / 10;
@@ -899,14 +921,15 @@ struct drm_connector *
899nouveau_connector_create(struct drm_device *dev, int index) 921nouveau_connector_create(struct drm_device *dev, int index)
900{ 922{
901 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 923 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
902 struct drm_nouveau_private *dev_priv = dev->dev_private; 924 struct nouveau_drm *drm = nouveau_drm(dev);
903 struct nouveau_display_engine *disp = &dev_priv->engine.display; 925 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
926 struct nouveau_display *disp = nouveau_display(dev);
904 struct nouveau_connector *nv_connector = NULL; 927 struct nouveau_connector *nv_connector = NULL;
905 struct drm_connector *connector; 928 struct drm_connector *connector;
906 int type, ret = 0; 929 int type, ret = 0;
907 bool dummy; 930 bool dummy;
908 931
909 NV_DEBUG_KMS(dev, "\n"); 932 NV_DEBUG(drm, "\n");
910 933
911 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
912 nv_connector = nouveau_connector(connector); 935 nv_connector = nouveau_connector(connector);
@@ -922,7 +945,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
922 nv_connector->index = index; 945 nv_connector->index = index;
923 946
924 /* attempt to parse vbios connector type and hotplug gpio */ 947 /* attempt to parse vbios connector type and hotplug gpio */
925 nv_connector->dcb = dcb_conn(dev, index); 948 nv_connector->dcb = olddcb_conn(dev, index);
926 if (nv_connector->dcb) { 949 if (nv_connector->dcb) {
927 static const u8 hpd[16] = { 950 static const u8 hpd[16] = {
928 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff, 951 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
@@ -930,7 +953,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
930 }; 953 };
931 954
932 u32 entry = ROM16(nv_connector->dcb[0]); 955 u32 entry = ROM16(nv_connector->dcb[0]);
933 if (dcb_conntab(dev)[3] >= 4) 956 if (olddcb_conntab(dev)[3] >= 4)
934 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16; 957 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
935 958
936 nv_connector->hpd = ffs((entry & 0x07033000) >> 12); 959 nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
@@ -939,7 +962,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
939 nv_connector->type = nv_connector->dcb[0]; 962 nv_connector->type = nv_connector->dcb[0];
940 if (drm_conntype_from_dcb(nv_connector->type) == 963 if (drm_conntype_from_dcb(nv_connector->type) ==
941 DRM_MODE_CONNECTOR_Unknown) { 964 DRM_MODE_CONNECTOR_Unknown) {
942 NV_WARN(dev, "unknown connector type %02x\n", 965 NV_WARN(drm, "unknown connector type %02x\n",
943 nv_connector->type); 966 nv_connector->type);
944 nv_connector->type = DCB_CONNECTOR_NONE; 967 nv_connector->type = DCB_CONNECTOR_NONE;
945 } 968 }
@@ -964,8 +987,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
964 * figure out something suitable ourselves 987 * figure out something suitable ourselves
965 */ 988 */
966 if (nv_connector->type == DCB_CONNECTOR_NONE) { 989 if (nv_connector->type == DCB_CONNECTOR_NONE) {
967 struct drm_nouveau_private *dev_priv = dev->dev_private; 990 struct nouveau_drm *drm = nouveau_drm(dev);
968 struct dcb_table *dcbt = &dev_priv->vbios.dcb; 991 struct dcb_table *dcbt = &drm->vbios.dcb;
969 u32 encoders = 0; 992 u32 encoders = 0;
970 int i; 993 int i;
971 994
@@ -974,25 +997,25 @@ nouveau_connector_create(struct drm_device *dev, int index)
974 encoders |= (1 << dcbt->entry[i].type); 997 encoders |= (1 << dcbt->entry[i].type);
975 } 998 }
976 999
977 if (encoders & (1 << OUTPUT_DP)) { 1000 if (encoders & (1 << DCB_OUTPUT_DP)) {
978 if (encoders & (1 << OUTPUT_TMDS)) 1001 if (encoders & (1 << DCB_OUTPUT_TMDS))
979 nv_connector->type = DCB_CONNECTOR_DP; 1002 nv_connector->type = DCB_CONNECTOR_DP;
980 else 1003 else
981 nv_connector->type = DCB_CONNECTOR_eDP; 1004 nv_connector->type = DCB_CONNECTOR_eDP;
982 } else 1005 } else
983 if (encoders & (1 << OUTPUT_TMDS)) { 1006 if (encoders & (1 << DCB_OUTPUT_TMDS)) {
984 if (encoders & (1 << OUTPUT_ANALOG)) 1007 if (encoders & (1 << DCB_OUTPUT_ANALOG))
985 nv_connector->type = DCB_CONNECTOR_DVI_I; 1008 nv_connector->type = DCB_CONNECTOR_DVI_I;
986 else 1009 else
987 nv_connector->type = DCB_CONNECTOR_DVI_D; 1010 nv_connector->type = DCB_CONNECTOR_DVI_D;
988 } else 1011 } else
989 if (encoders & (1 << OUTPUT_ANALOG)) { 1012 if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
990 nv_connector->type = DCB_CONNECTOR_VGA; 1013 nv_connector->type = DCB_CONNECTOR_VGA;
991 } else 1014 } else
992 if (encoders & (1 << OUTPUT_LVDS)) { 1015 if (encoders & (1 << DCB_OUTPUT_LVDS)) {
993 nv_connector->type = DCB_CONNECTOR_LVDS; 1016 nv_connector->type = DCB_CONNECTOR_LVDS;
994 } else 1017 } else
995 if (encoders & (1 << OUTPUT_TV)) { 1018 if (encoders & (1 << DCB_OUTPUT_TV)) {
996 nv_connector->type = DCB_CONNECTOR_TV_0; 1019 nv_connector->type = DCB_CONNECTOR_TV_0;
997 } 1020 }
998 } 1021 }
@@ -1001,7 +1024,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1001 if (type == DRM_MODE_CONNECTOR_LVDS) { 1024 if (type == DRM_MODE_CONNECTOR_LVDS) {
1002 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy); 1025 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
1003 if (ret) { 1026 if (ret) {
1004 NV_ERROR(dev, "Error parsing LVDS table, disabling\n"); 1027 NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
1005 kfree(nv_connector); 1028 kfree(nv_connector);
1006 return ERR_PTR(ret); 1029 return ERR_PTR(ret);
1007 } 1030 }
@@ -1051,7 +1074,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1051 1074
1052 switch (nv_connector->type) { 1075 switch (nv_connector->type) {
1053 case DCB_CONNECTOR_VGA: 1076 case DCB_CONNECTOR_VGA:
1054 if (dev_priv->card_type >= NV_50) { 1077 if (nv_device(drm->device)->card_type >= NV_50) {
1055 drm_connector_attach_property(connector, 1078 drm_connector_attach_property(connector,
1056 dev->mode_config.scaling_mode_property, 1079 dev->mode_config.scaling_mode_property,
1057 nv_connector->scaling_mode); 1080 nv_connector->scaling_mode);
@@ -1084,10 +1107,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
1084 } 1107 }
1085 1108
1086 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1109 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1087 if (nv_connector->hpd != DCB_GPIO_UNUSED) { 1110 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
1088 ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff, 1111 ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
1089 nouveau_connector_hotplug, 1112 nouveau_connector_hotplug, connector);
1090 connector);
1091 if (ret == 0) 1113 if (ret == 0)
1092 connector->polled = DRM_CONNECTOR_POLL_HPD; 1114 connector->polled = DRM_CONNECTOR_POLL_HPD;
1093 } 1115 }
@@ -1101,8 +1123,9 @@ nouveau_connector_hotplug(void *data, int plugged)
1101{ 1123{
1102 struct drm_connector *connector = data; 1124 struct drm_connector *connector = data;
1103 struct drm_device *dev = connector->dev; 1125 struct drm_device *dev = connector->dev;
1126 struct nouveau_drm *drm = nouveau_drm(dev);
1104 1127
1105 NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un", 1128 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
1106 drm_get_connector_name(connector)); 1129 drm_get_connector_name(connector));
1107 1130
1108 if (plugged) 1131 if (plugged)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index e4857021304c..9503cfa0492c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,7 +28,8 @@
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include "drm_edid.h" 30#include "drm_edid.h"
31#include "nouveau_i2c.h" 31
32struct nouveau_i2c_port;
32 33
33enum nouveau_underscan_type { 34enum nouveau_underscan_type {
34 UNDERSCAN_OFF, 35 UNDERSCAN_OFF,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
deleted file mode 100644
index 188c92b327e2..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "nouveau_drv.h"
35
36#include <ttm/ttm_page_alloc.h>
37
38static int
39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
40{
41 struct drm_info_node *node = (struct drm_info_node *) m->private;
42 struct nouveau_channel *chan = node->info_ent->data;
43
44 seq_printf(m, "channel id : %d\n", chan->id);
45
46 seq_printf(m, "cpu fifo state:\n");
47 seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
48 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
49 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
50 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
51 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
52 if (chan->dma.ib_max) {
53 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
54 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
55 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
56 }
57
58 seq_printf(m, "gpu fifo state:\n");
59 seq_printf(m, " get: 0x%08x\n",
60 nvchan_rd32(chan, chan->user_get));
61 seq_printf(m, " put: 0x%08x\n",
62 nvchan_rd32(chan, chan->user_put));
63 if (chan->dma.ib_max) {
64 seq_printf(m, " ib get: 0x%08x\n",
65 nvchan_rd32(chan, 0x88));
66 seq_printf(m, " ib put: 0x%08x\n",
67 nvchan_rd32(chan, 0x8c));
68 }
69
70 return 0;
71}
72
73int
74nouveau_debugfs_channel_init(struct nouveau_channel *chan)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct drm_minor *minor = chan->dev->primary;
78 int ret;
79
80 if (!dev_priv->debugfs.channel_root) {
81 dev_priv->debugfs.channel_root =
82 debugfs_create_dir("channel", minor->debugfs_root);
83 if (!dev_priv->debugfs.channel_root)
84 return -ENOENT;
85 }
86
87 snprintf(chan->debugfs.name, 32, "%d", chan->id);
88 chan->debugfs.info.name = chan->debugfs.name;
89 chan->debugfs.info.show = nouveau_debugfs_channel_info;
90 chan->debugfs.info.driver_features = 0;
91 chan->debugfs.info.data = chan;
92
93 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
94 dev_priv->debugfs.channel_root,
95 chan->dev->primary);
96 if (ret == 0)
97 chan->debugfs.active = true;
98 return ret;
99}
100
101void
102nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
103{
104 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
105
106 if (!chan->debugfs.active)
107 return;
108
109 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
110 chan->debugfs.active = false;
111
112 if (chan == dev_priv->channel) {
113 debugfs_remove(dev_priv->debugfs.channel_root);
114 dev_priv->debugfs.channel_root = NULL;
115 }
116}
117
118static int
119nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
120{
121 struct drm_info_node *node = (struct drm_info_node *) m->private;
122 struct drm_minor *minor = node->minor;
123 struct drm_device *dev = minor->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 uint32_t ppci_0;
126
127 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
128
129 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
130 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
131 ppci_0 & 0xffff, ppci_0 >> 16);
132 return 0;
133}
134
135static int
136nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor;
140 struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
141
142 seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
143 return 0;
144}
145
146static int
147nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
148{
149 struct drm_info_node *node = (struct drm_info_node *) m->private;
150 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
151 int i;
152
153 for (i = 0; i < dev_priv->vbios.length; i++)
154 seq_printf(m, "%c", dev_priv->vbios.data[i]);
155 return 0;
156}
157
158static int
159nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
160{
161 struct drm_info_node *node = (struct drm_info_node *) m->private;
162 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
163 int ret;
164
165 ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
166 if (ret)
167 seq_printf(m, "failed: %d", ret);
168 else
169 seq_printf(m, "succeeded\n");
170 return 0;
171}
172
173static struct drm_info_list nouveau_debugfs_list[] = {
174 { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
175 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
176 { "memory", nouveau_debugfs_memory_info, 0, NULL },
177 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
178 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
179 { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
180};
181#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
182
183int
184nouveau_debugfs_init(struct drm_minor *minor)
185{
186 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
187 minor->debugfs_root, minor);
188 return 0;
189}
190
191void
192nouveau_debugfs_takedown(struct drm_minor *minor)
193{
194 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
195 minor);
196}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7e16dc5e6467..61f370d000e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,18 +26,21 @@
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h" 29
30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h" 30#include "nouveau_fbcon.h"
32#include "nouveau_hw.h" 31#include "nouveau_hw.h"
33#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_gem.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_software.h"
37#include "nouveau_gpio.h"
38#include "nouveau_fence.h"
39#include "nv50_display.h" 36#include "nv50_display.h"
40 37
38#include "nouveau_fence.h"
39
40#include <subdev/bios/gpio.h>
41#include <subdev/gpio.h>
42#include <engine/disp.h>
43
41static void 44static void
42nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 45nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
43{ 46{
@@ -71,7 +74,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
71 struct drm_mode_fb_cmd2 *mode_cmd, 74 struct drm_mode_fb_cmd2 *mode_cmd,
72 struct nouveau_bo *nvbo) 75 struct nouveau_bo *nvbo)
73{ 76{
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct drm_framebuffer *fb = &nv_fb->base; 78 struct drm_framebuffer *fb = &nv_fb->base;
76 int ret; 79 int ret;
77 80
@@ -83,7 +86,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
83 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 86 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
84 nv_fb->nvbo = nvbo; 87 nv_fb->nvbo = nvbo;
85 88
86 if (dev_priv->card_type >= NV_50) { 89 if (nv_device(drm->device)->card_type >= NV_50) {
87 u32 tile_flags = nouveau_bo_tile_layout(nvbo); 90 u32 tile_flags = nouveau_bo_tile_layout(nvbo);
88 if (tile_flags == 0x7a00 || 91 if (tile_flags == 0x7a00 ||
89 tile_flags == 0xfe00) 92 tile_flags == 0xfe00)
@@ -102,21 +105,21 @@ nouveau_framebuffer_init(struct drm_device *dev,
102 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; 105 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
103 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; 106 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
104 default: 107 default:
105 NV_ERROR(dev, "unknown depth %d\n", fb->depth); 108 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
106 return -EINVAL; 109 return -EINVAL;
107 } 110 }
108 111
109 if (dev_priv->chipset == 0x50) 112 if (nv_device(drm->device)->chipset == 0x50)
110 nv_fb->r_format |= (tile_flags << 8); 113 nv_fb->r_format |= (tile_flags << 8);
111 114
112 if (!tile_flags) { 115 if (!tile_flags) {
113 if (dev_priv->card_type < NV_D0) 116 if (nv_device(drm->device)->card_type < NV_D0)
114 nv_fb->r_pitch = 0x00100000 | fb->pitches[0]; 117 nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
115 else 118 else
116 nv_fb->r_pitch = 0x01000000 | fb->pitches[0]; 119 nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
117 } else { 120 } else {
118 u32 mode = nvbo->tile_mode; 121 u32 mode = nvbo->tile_mode;
119 if (dev_priv->card_type >= NV_C0) 122 if (nv_device(drm->device)->card_type >= NV_C0)
120 mode >>= 4; 123 mode >>= 4;
121 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode; 124 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
122 } 125 }
@@ -212,8 +215,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
212int 215int
213nouveau_display_init(struct drm_device *dev) 216nouveau_display_init(struct drm_device *dev)
214{ 217{
215 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct nouveau_drm *drm = nouveau_drm(dev);
216 struct nouveau_display_engine *disp = &dev_priv->engine.display; 219 struct nouveau_display *disp = nouveau_display(dev);
220 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
217 struct drm_connector *connector; 221 struct drm_connector *connector;
218 int ret; 222 int ret;
219 223
@@ -225,8 +229,8 @@ nouveau_display_init(struct drm_device *dev)
225 * some vbios default this to off for some reason, causing the 229 * some vbios default this to off for some reason, causing the
226 * panel to not work after resume 230 * panel to not work after resume
227 */ 231 */
228 if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) { 232 if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
229 nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true); 233 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
230 msleep(300); 234 msleep(300);
231 } 235 }
232 236
@@ -236,7 +240,8 @@ nouveau_display_init(struct drm_device *dev)
236 /* enable hotplug interrupts */ 240 /* enable hotplug interrupts */
237 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 241 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
238 struct nouveau_connector *conn = nouveau_connector(connector); 242 struct nouveau_connector *conn = nouveau_connector(connector);
239 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true); 243 if (gpio)
244 gpio->irq(gpio, 0, conn->hpd, 0xff, true);
240 } 245 }
241 246
242 return ret; 247 return ret;
@@ -245,35 +250,65 @@ nouveau_display_init(struct drm_device *dev)
245void 250void
246nouveau_display_fini(struct drm_device *dev) 251nouveau_display_fini(struct drm_device *dev)
247{ 252{
248 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
249 struct nouveau_display_engine *disp = &dev_priv->engine.display; 254 struct nouveau_display *disp = nouveau_display(dev);
255 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
250 struct drm_connector *connector; 256 struct drm_connector *connector;
251 257
252 /* disable hotplug interrupts */ 258 /* disable hotplug interrupts */
253 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 259 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
254 struct nouveau_connector *conn = nouveau_connector(connector); 260 struct nouveau_connector *conn = nouveau_connector(connector);
255 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false); 261 if (gpio)
262 gpio->irq(gpio, 0, conn->hpd, 0xff, false);
256 } 263 }
257 264
258 drm_kms_helper_poll_disable(dev); 265 drm_kms_helper_poll_disable(dev);
259 disp->fini(dev); 266 disp->fini(dev);
260} 267}
261 268
269static void
270nouveau_display_vblank_notify(void *data, int crtc)
271{
272 drm_handle_vblank(data, crtc);
273}
274
275static void
276nouveau_display_vblank_get(void *data, int crtc)
277{
278 drm_vblank_get(data, crtc);
279}
280
281static void
282nouveau_display_vblank_put(void *data, int crtc)
283{
284 drm_vblank_put(data, crtc);
285}
286
262int 287int
263nouveau_display_create(struct drm_device *dev) 288nouveau_display_create(struct drm_device *dev)
264{ 289{
265 struct drm_nouveau_private *dev_priv = dev->dev_private; 290 struct nouveau_drm *drm = nouveau_drm(dev);
266 struct nouveau_display_engine *disp = &dev_priv->engine.display; 291 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
292 struct nouveau_display *disp;
267 int ret, gen; 293 int ret, gen;
268 294
295 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
296 if (!disp)
297 return -ENOMEM;
298
299 pdisp->vblank.data = dev;
300 pdisp->vblank.notify = nouveau_display_vblank_notify;
301 pdisp->vblank.get = nouveau_display_vblank_get;
302 pdisp->vblank.put = nouveau_display_vblank_put;
303
269 drm_mode_config_init(dev); 304 drm_mode_config_init(dev);
270 drm_mode_create_scaling_mode_property(dev); 305 drm_mode_create_scaling_mode_property(dev);
271 drm_mode_create_dvi_i_properties(dev); 306 drm_mode_create_dvi_i_properties(dev);
272 307
273 if (dev_priv->card_type < NV_50) 308 if (nv_device(drm->device)->card_type < NV_50)
274 gen = 0; 309 gen = 0;
275 else 310 else
276 if (dev_priv->card_type < NV_D0) 311 if (nv_device(drm->device)->card_type < NV_D0)
277 gen = 1; 312 gen = 1;
278 else 313 else
279 gen = 2; 314 gen = 2;
@@ -307,11 +342,11 @@ nouveau_display_create(struct drm_device *dev)
307 342
308 dev->mode_config.min_width = 0; 343 dev->mode_config.min_width = 0;
309 dev->mode_config.min_height = 0; 344 dev->mode_config.min_height = 0;
310 if (dev_priv->card_type < NV_10) { 345 if (nv_device(drm->device)->card_type < NV_10) {
311 dev->mode_config.max_width = 2048; 346 dev->mode_config.max_width = 2048;
312 dev->mode_config.max_height = 2048; 347 dev->mode_config.max_height = 2048;
313 } else 348 } else
314 if (dev_priv->card_type < NV_50) { 349 if (nv_device(drm->device)->card_type < NV_50) {
315 dev->mode_config.max_width = 4096; 350 dev->mode_config.max_width = 4096;
316 dev->mode_config.max_height = 4096; 351 dev->mode_config.max_height = 4096;
317 } else { 352 } else {
@@ -325,7 +360,13 @@ nouveau_display_create(struct drm_device *dev)
325 drm_kms_helper_poll_init(dev); 360 drm_kms_helper_poll_init(dev);
326 drm_kms_helper_poll_disable(dev); 361 drm_kms_helper_poll_disable(dev);
327 362
328 ret = disp->create(dev); 363 if (nv_device(drm->device)->card_type < NV_50)
364 ret = nv04_display_create(dev);
365 else
366 if (nv_device(drm->device)->card_type < NV_D0)
367 ret = nv50_display_create(dev);
368 else
369 ret = nvd0_display_create(dev);
329 if (ret) 370 if (ret)
330 goto disp_create_err; 371 goto disp_create_err;
331 372
@@ -335,10 +376,11 @@ nouveau_display_create(struct drm_device *dev)
335 goto vblank_err; 376 goto vblank_err;
336 } 377 }
337 378
379 nouveau_backlight_init(dev);
338 return 0; 380 return 0;
339 381
340vblank_err: 382vblank_err:
341 disp->destroy(dev); 383 disp->dtor(dev);
342disp_create_err: 384disp_create_err:
343 drm_kms_helper_poll_fini(dev); 385 drm_kms_helper_poll_fini(dev);
344 drm_mode_config_cleanup(dev); 386 drm_mode_config_cleanup(dev);
@@ -348,24 +390,109 @@ disp_create_err:
348void 390void
349nouveau_display_destroy(struct drm_device *dev) 391nouveau_display_destroy(struct drm_device *dev)
350{ 392{
351 struct drm_nouveau_private *dev_priv = dev->dev_private; 393 struct nouveau_display *disp = nouveau_display(dev);
352 struct nouveau_display_engine *disp = &dev_priv->engine.display;
353 394
395 nouveau_backlight_exit(dev);
354 drm_vblank_cleanup(dev); 396 drm_vblank_cleanup(dev);
355 397
356 disp->destroy(dev); 398 disp->dtor(dev);
357 399
358 drm_kms_helper_poll_fini(dev); 400 drm_kms_helper_poll_fini(dev);
359 drm_mode_config_cleanup(dev); 401 drm_mode_config_cleanup(dev);
402 nouveau_drm(dev)->display = NULL;
403 kfree(disp);
404}
405
406int
407nouveau_display_suspend(struct drm_device *dev)
408{
409 struct nouveau_drm *drm = nouveau_drm(dev);
410 struct drm_crtc *crtc;
411
412 nouveau_display_fini(dev);
413
414 NV_INFO(drm, "unpinning framebuffer(s)...\n");
415 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
416 struct nouveau_framebuffer *nouveau_fb;
417
418 nouveau_fb = nouveau_framebuffer(crtc->fb);
419 if (!nouveau_fb || !nouveau_fb->nvbo)
420 continue;
421
422 nouveau_bo_unpin(nouveau_fb->nvbo);
423 }
424
425 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
426 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
427
428 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
429 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
430 }
431
432 return 0;
433}
434
435void
436nouveau_display_resume(struct drm_device *dev)
437{
438 struct nouveau_drm *drm = nouveau_drm(dev);
439 struct drm_crtc *crtc;
440 int ret;
441
442 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
443 struct nouveau_framebuffer *nouveau_fb;
444
445 nouveau_fb = nouveau_framebuffer(crtc->fb);
446 if (!nouveau_fb || !nouveau_fb->nvbo)
447 continue;
448
449 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
450 }
451
452 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
453 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
454
455 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
456 if (!ret)
457 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
458 if (ret)
459 NV_ERROR(drm, "Could not pin/map cursor.\n");
460 }
461
462 nouveau_fbcon_set_suspend(dev, 0);
463 nouveau_fbcon_zfill_all(dev);
464
465 nouveau_display_init(dev);
466
467 /* Force CLUT to get re-loaded during modeset */
468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
469 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
470
471 nv_crtc->lut.depth = 0;
472 }
473
474 drm_helper_resume_force_mode(dev);
475
476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
478 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
479
480 nv_crtc->cursor.set_offset(nv_crtc, offset);
481 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
482 nv_crtc->cursor_saved_y);
483 }
360} 484}
361 485
362int 486int
363nouveau_vblank_enable(struct drm_device *dev, int crtc) 487nouveau_vblank_enable(struct drm_device *dev, int crtc)
364{ 488{
365 struct drm_nouveau_private *dev_priv = dev->dev_private; 489 struct nouveau_device *device = nouveau_dev(dev);
366 490
367 if (dev_priv->card_type >= NV_50) 491 if (device->card_type >= NV_D0)
368 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, 492 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
493 else
494 if (device->card_type >= NV_50)
495 nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
369 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); 496 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
370 else 497 else
371 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 498 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
@@ -377,10 +504,13 @@ nouveau_vblank_enable(struct drm_device *dev, int crtc)
377void 504void
378nouveau_vblank_disable(struct drm_device *dev, int crtc) 505nouveau_vblank_disable(struct drm_device *dev, int crtc)
379{ 506{
380 struct drm_nouveau_private *dev_priv = dev->dev_private; 507 struct nouveau_device *device = nouveau_dev(dev);
381 508
382 if (dev_priv->card_type >= NV_50) 509 if (device->card_type >= NV_D0)
383 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 510 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
511 else
512 if (device->card_type >= NV_50)
513 nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
384 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); 514 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
385 else 515 else
386 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); 516 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
@@ -434,15 +564,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
434 struct nouveau_page_flip_state *s, 564 struct nouveau_page_flip_state *s,
435 struct nouveau_fence **pfence) 565 struct nouveau_fence **pfence)
436{ 566{
437 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW]; 567 struct nouveau_fence_chan *fctx = chan->fence;
438 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 568 struct nouveau_drm *drm = chan->drm;
439 struct drm_device *dev = chan->dev; 569 struct drm_device *dev = drm->dev;
440 unsigned long flags; 570 unsigned long flags;
441 int ret; 571 int ret;
442 572
443 /* Queue it to the pending list */ 573 /* Queue it to the pending list */
444 spin_lock_irqsave(&dev->event_lock, flags); 574 spin_lock_irqsave(&dev->event_lock, flags);
445 list_add_tail(&s->head, &swch->flip); 575 list_add_tail(&s->head, &fctx->flip);
446 spin_unlock_irqrestore(&dev->event_lock, flags); 576 spin_unlock_irqrestore(&dev->event_lock, flags);
447 577
448 /* Synchronize with the old framebuffer */ 578 /* Synchronize with the old framebuffer */
@@ -455,7 +585,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
455 if (ret) 585 if (ret)
456 goto fail; 586 goto fail;
457 587
458 if (dev_priv->card_type < NV_C0) { 588 if (nv_device(drm->device)->card_type < NV_C0) {
459 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 589 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
460 OUT_RING (chan, 0x00000000); 590 OUT_RING (chan, 0x00000000);
461 OUT_RING (chan, 0x00000000); 591 OUT_RING (chan, 0x00000000);
@@ -483,7 +613,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
483 struct drm_pending_vblank_event *event) 613 struct drm_pending_vblank_event *event)
484{ 614{
485 struct drm_device *dev = crtc->dev; 615 struct drm_device *dev = crtc->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct nouveau_drm *drm = nouveau_drm(dev);
487 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 617 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
488 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 618 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
489 struct nouveau_page_flip_state *s; 619 struct nouveau_page_flip_state *s;
@@ -491,7 +621,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
491 struct nouveau_fence *fence; 621 struct nouveau_fence *fence;
492 int ret; 622 int ret;
493 623
494 if (!dev_priv->channel) 624 if (!drm->channel)
495 return -ENODEV; 625 return -ENODEV;
496 626
497 s = kzalloc(sizeof(*s), GFP_KERNEL); 627 s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -512,25 +642,25 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
512 /* Choose the channel the flip will be handled in */ 642 /* Choose the channel the flip will be handled in */
513 fence = new_bo->bo.sync_obj; 643 fence = new_bo->bo.sync_obj;
514 if (fence) 644 if (fence)
515 chan = nouveau_channel_get_unlocked(fence->channel); 645 chan = fence->channel;
516 if (!chan) 646 if (!chan)
517 chan = nouveau_channel_get_unlocked(dev_priv->channel); 647 chan = drm->channel;
518 mutex_lock(&chan->mutex); 648 mutex_lock(&chan->cli->mutex);
519 649
520 /* Emit a page flip */ 650 /* Emit a page flip */
521 if (dev_priv->card_type >= NV_50) { 651 if (nv_device(drm->device)->card_type >= NV_50) {
522 if (dev_priv->card_type >= NV_D0) 652 if (nv_device(drm->device)->card_type >= NV_D0)
523 ret = nvd0_display_flip_next(crtc, fb, chan, 0); 653 ret = nvd0_display_flip_next(crtc, fb, chan, 0);
524 else 654 else
525 ret = nv50_display_flip_next(crtc, fb, chan); 655 ret = nv50_display_flip_next(crtc, fb, chan);
526 if (ret) { 656 if (ret) {
527 nouveau_channel_put(&chan); 657 mutex_unlock(&chan->cli->mutex);
528 goto fail_unreserve; 658 goto fail_unreserve;
529 } 659 }
530 } 660 }
531 661
532 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 662 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
533 nouveau_channel_put(&chan); 663 mutex_unlock(&chan->cli->mutex);
534 if (ret) 664 if (ret)
535 goto fail_unreserve; 665 goto fail_unreserve;
536 666
@@ -552,20 +682,21 @@ int
552nouveau_finish_page_flip(struct nouveau_channel *chan, 682nouveau_finish_page_flip(struct nouveau_channel *chan,
553 struct nouveau_page_flip_state *ps) 683 struct nouveau_page_flip_state *ps)
554{ 684{
555 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW]; 685 struct nouveau_fence_chan *fctx = chan->fence;
556 struct drm_device *dev = chan->dev; 686 struct nouveau_drm *drm = chan->drm;
687 struct drm_device *dev = drm->dev;
557 struct nouveau_page_flip_state *s; 688 struct nouveau_page_flip_state *s;
558 unsigned long flags; 689 unsigned long flags;
559 690
560 spin_lock_irqsave(&dev->event_lock, flags); 691 spin_lock_irqsave(&dev->event_lock, flags);
561 692
562 if (list_empty(&swch->flip)) { 693 if (list_empty(&fctx->flip)) {
563 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); 694 NV_ERROR(drm, "unexpected pageflip\n");
564 spin_unlock_irqrestore(&dev->event_lock, flags); 695 spin_unlock_irqrestore(&dev->event_lock, flags);
565 return -EINVAL; 696 return -EINVAL;
566 } 697 }
567 698
568 s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head); 699 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
569 if (s->event) { 700 if (s->event) {
570 struct drm_pending_vblank_event *e = s->event; 701 struct drm_pending_vblank_event *e = s->event;
571 struct timeval now; 702 struct timeval now;
@@ -588,6 +719,24 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
588} 719}
589 720
590int 721int
722nouveau_flip_complete(void *data)
723{
724 struct nouveau_channel *chan = data;
725 struct nouveau_drm *drm = chan->drm;
726 struct nouveau_page_flip_state state;
727
728 if (!nouveau_finish_page_flip(chan, &state)) {
729 if (nv_device(drm->device)->card_type < NV_50) {
730 nv_set_crtc_base(drm->dev, state.crtc, state.offset +
731 state.y * state.pitch +
732 state.x * state.bpp / 8);
733 }
734 }
735
736 return 0;
737}
738
739int
591nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 740nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
592 struct drm_mode_create_dumb *args) 741 struct drm_mode_create_dumb *args)
593{ 742{
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
new file mode 100644
index 000000000000..722548bb3bd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -0,0 +1,94 @@
1#ifndef __NOUVEAU_DISPLAY_H__
2#define __NOUVEAU_DISPLAY_H__
3
4#include <subdev/vm.h>
5
6#include "nouveau_drm.h"
7
8struct nouveau_framebuffer {
9 struct drm_framebuffer base;
10 struct nouveau_bo *nvbo;
11 struct nouveau_vma vma;
12 u32 r_dma;
13 u32 r_format;
14 u32 r_pitch;
15};
16
17static inline struct nouveau_framebuffer *
18nouveau_framebuffer(struct drm_framebuffer *fb)
19{
20 return container_of(fb, struct nouveau_framebuffer, base);
21}
22
23int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
24 struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
25
26struct nouveau_page_flip_state {
27 struct list_head head;
28 struct drm_pending_vblank_event *event;
29 int crtc, bpp, pitch, x, y;
30 u64 offset;
31};
32
33struct nouveau_display {
34 void *priv;
35 void (*dtor)(struct drm_device *);
36 int (*init)(struct drm_device *);
37 void (*fini)(struct drm_device *);
38
39 struct drm_property *dithering_mode;
40 struct drm_property *dithering_depth;
41 struct drm_property *underscan_property;
42 struct drm_property *underscan_hborder_property;
43 struct drm_property *underscan_vborder_property;
44 /* not really hue and saturation: */
45 struct drm_property *vibrant_hue_property;
46 struct drm_property *color_vibrance_property;
47};
48
49static inline struct nouveau_display *
50nouveau_display(struct drm_device *dev)
51{
52 return nouveau_drm(dev)->display;
53}
54
55int nouveau_display_create(struct drm_device *dev);
56void nouveau_display_destroy(struct drm_device *dev);
57int nouveau_display_init(struct drm_device *dev);
58void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev);
61
62int nouveau_vblank_enable(struct drm_device *dev, int crtc);
63void nouveau_vblank_disable(struct drm_device *dev, int crtc);
64
65int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
66 struct drm_pending_vblank_event *event);
67int nouveau_finish_page_flip(struct nouveau_channel *,
68 struct nouveau_page_flip_state *);
69
70int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
71 struct drm_mode_create_dumb *args);
72int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
73 u32 handle, u64 *offset);
74int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
75 u32 handle);
76
77void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
78
79#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
80extern int nouveau_backlight_init(struct drm_device *);
81extern void nouveau_backlight_exit(struct drm_device *);
82#else
83static inline int
84nouveau_backlight_init(struct drm_device *dev)
85{
86 return 0;
87}
88
89static inline void
90nouveau_backlight_exit(struct drm_device *dev) {
91}
92#endif
93
94#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 295932e66ac5..40f91e1e5842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,41 +24,16 @@
24 * 24 *
25 */ 25 */
26 26
27#include "drmP.h" 27#include <core/client.h>
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31#include "nouveau_ramht.h"
32
33void
34nouveau_dma_init(struct nouveau_channel *chan)
35{
36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38
39 if (dev_priv->card_type >= NV_50) {
40 const int ib_size = pushbuf->bo.mem.size / 2;
41
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
43 chan->dma.ib_max = (ib_size / 8) - 1;
44 chan->dma.ib_put = 0;
45 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
46 28
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; 29#include "nouveau_drm.h"
48 } else { 30#include "nouveau_dma.h"
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
50 }
51
52 chan->dma.put = 0;
53 chan->dma.cur = chan->dma.put;
54 chan->dma.free = chan->dma.max - chan->dma.cur;
55}
56 31
57void 32void
58OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 33OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
59{ 34{
60 bool is_iomem; 35 bool is_iomem;
61 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); 36 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
62 mem = &mem[chan->dma.cur]; 37 mem = &mem[chan->dma.cur];
63 if (is_iomem) 38 if (is_iomem)
64 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 39 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -79,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
79{ 54{
80 uint64_t val; 55 uint64_t val;
81 56
82 val = nvchan_rd32(chan, chan->user_get); 57 val = nv_ro32(chan->object, chan->user_get);
83 if (chan->user_get_hi) 58 if (chan->user_get_hi)
84 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; 59 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
85 60
86 /* reset counter as long as GET is still advancing, this is 61 /* reset counter as long as GET is still advancing, this is
87 * to avoid misdetecting a GPU lockup if the GPU happens to 62 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -93,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
93 } 68 }
94 69
95 if ((++*timeout & 0xff) == 0) { 70 if ((++*timeout & 0xff) == 0) {
96 DRM_UDELAY(1); 71 udelay(1);
97 if (*timeout > 100000) 72 if (*timeout > 100000)
98 return -EBUSY; 73 return -EBUSY;
99 } 74 }
100 75
101 if (val < chan->pushbuf_base || 76 if (val < chan->push.vma.offset ||
102 val > chan->pushbuf_base + (chan->dma.max << 2)) 77 val > chan->push.vma.offset + (chan->dma.max << 2))
103 return -EINVAL; 78 return -EINVAL;
104 79
105 return (val - chan->pushbuf_base) >> 2; 80 return (val - chan->push.vma.offset) >> 2;
106} 81}
107 82
108void 83void
109nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 84nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
110 int delta, int length) 85 int delta, int length)
111{ 86{
112 struct nouveau_bo *pb = chan->pushbuf_bo; 87 struct nouveau_bo *pb = chan->push.buffer;
113 struct nouveau_vma *vma; 88 struct nouveau_vma *vma;
114 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
115 u64 offset; 90 u64 offset;
116 91
117 vma = nouveau_bo_vma_find(bo, chan->vm); 92 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
118 BUG_ON(!vma); 93 BUG_ON(!vma);
119 offset = vma->offset + delta; 94 offset = vma->offset + delta;
120 95
121 BUG_ON(chan->dma.ib_free < 1); 96 BUG_ON(chan->dma.ib_free < 1);
97
122 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 98 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
123 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 99 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
124 100
@@ -128,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
128 /* Flush writes. */ 104 /* Flush writes. */
129 nouveau_bo_rd32(pb, 0); 105 nouveau_bo_rd32(pb, 0);
130 106
131 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 107 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
132 chan->dma.ib_free--; 108 chan->dma.ib_free--;
133} 109}
134 110
@@ -138,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
138 uint32_t cnt = 0, prev_get = 0; 114 uint32_t cnt = 0, prev_get = 0;
139 115
140 while (chan->dma.ib_free < count) { 116 while (chan->dma.ib_free < count) {
141 uint32_t get = nvchan_rd32(chan, 0x88); 117 uint32_t get = nv_ro32(chan->object, 0x88);
142 if (get != prev_get) { 118 if (get != prev_get) {
143 prev_get = get; 119 prev_get = get;
144 cnt = 0; 120 cnt = 0;
@@ -249,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
249 * instruct the GPU to jump back to the start right 225 * instruct the GPU to jump back to the start right
250 * after processing the currently pending commands. 226 * after processing the currently pending commands.
251 */ 227 */
252 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 228 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
253 229
254 /* wait for GET to depart from the skips area. 230 /* wait for GET to depart from the skips area.
255 * prevents writing GET==PUT and causing a race 231 * prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8db68be9544f..5c2e22932d1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -27,10 +27,10 @@
27#ifndef __NOUVEAU_DMA_H__ 27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__ 28#define __NOUVEAU_DMA_H__
29 29
30#ifndef NOUVEAU_DMA_DEBUG 30#include "nouveau_bo.h"
31#define NOUVEAU_DMA_DEBUG 0 31#include "nouveau_chan.h"
32#endif
33 32
33int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length); 35 int delta, int length);
36 36
@@ -116,12 +116,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
116static inline void 116static inline void
117OUT_RING(struct nouveau_channel *chan, int data) 117OUT_RING(struct nouveau_channel *chan, int data)
118{ 118{
119 if (NOUVEAU_DMA_DEBUG) { 119 nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
120 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
121 chan->id, chan->dma.cur << 2, data);
122 }
123
124 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
125} 120}
126 121
127extern void 122extern void
@@ -159,24 +154,19 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
159 154
160#define WRITE_PUT(val) do { \ 155#define WRITE_PUT(val) do { \
161 DRM_MEMORYBARRIER(); \ 156 DRM_MEMORYBARRIER(); \
162 nouveau_bo_rd32(chan->pushbuf_bo, 0); \ 157 nouveau_bo_rd32(chan->push.buffer, 0); \
163 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \ 158 nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
164} while (0) 159} while (0)
165 160
166static inline void 161static inline void
167FIRE_RING(struct nouveau_channel *chan) 162FIRE_RING(struct nouveau_channel *chan)
168{ 163{
169 if (NOUVEAU_DMA_DEBUG) {
170 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
171 chan->id, chan->dma.cur << 2);
172 }
173
174 if (chan->dma.cur == chan->dma.put) 164 if (chan->dma.cur == chan->dma.put)
175 return; 165 return;
176 chan->accel_done = true; 166 chan->accel_done = true;
177 167
178 if (chan->dma.ib_max) { 168 if (chan->dma.ib_max) {
179 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, 169 nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
180 (chan->dma.cur - chan->dma.put) << 2); 170 (chan->dma.cur - chan->dma.put) << 2);
181 } else { 171 } else {
182 WRITE_PUT(chan->dma.cur); 172 WRITE_PUT(chan->dma.cur);
@@ -191,4 +181,31 @@ WIND_RING(struct nouveau_channel *chan)
191 chan->dma.cur = chan->dma.put; 181 chan->dma.cur = chan->dma.put;
192} 182}
193 183
184/* FIFO methods */
185#define NV01_SUBCHAN_OBJECT 0x00000000
186#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
187#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
188#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
189#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
190#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
191#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
192#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
193#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
194#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
196#define NV10_SUBCHAN_REF_CNT 0x00000050
197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
198#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
199#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
200#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
201#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
202#define NV40_SUBCHAN_YIELD 0x00000080
203
204/* NV_SW object class */
205#define NV_SW_DMA_VBLSEM 0x0000018c
206#define NV_SW_VBLSEM_OFFSET 0x00000400
207#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
208#define NV_SW_VBLSEM_RELEASE 0x00000408
209#define NV_SW_PAGE_FLIP 0x00000500
210
194#endif 211#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index e754aa32edf1..72266ae91fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -23,164 +23,37 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "drm_dp_helper.h"
26 27
27#include "nouveau_drv.h" 28#include "nouveau_drm.h"
28#include "nouveau_i2c.h"
29#include "nouveau_connector.h" 29#include "nouveau_connector.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_gpio.h"
33 32
34/****************************************************************************** 33#include <subdev/gpio.h>
35 * aux channel util functions 34#include <subdev/i2c.h>
36 *****************************************************************************/
37#define AUX_DBG(fmt, args...) do { \
38 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \
39 NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \
40 } \
41} while (0)
42#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
43
44static void
45auxch_fini(struct drm_device *dev, int ch)
46{
47 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
48}
49
50static int
51auxch_init(struct drm_device *dev, int ch)
52{
53 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
54 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
55 const u32 urep = unksel ? 0x01000000 : 0x02000000;
56 u32 ctrl, timeout;
57
58 /* wait up to 1ms for any previous transaction to be done... */
59 timeout = 1000;
60 do {
61 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
62 udelay(1);
63 if (!timeout--) {
64 AUX_ERR("begin idle timeout 0x%08x", ctrl);
65 return -EBUSY;
66 }
67 } while (ctrl & 0x03010000);
68
69 /* set some magic, and wait up to 1ms for it to appear */
70 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
71 timeout = 1000;
72 do {
73 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
74 udelay(1);
75 if (!timeout--) {
76 AUX_ERR("magic wait 0x%08x\n", ctrl);
77 auxch_fini(dev, ch);
78 return -EBUSY;
79 }
80 } while ((ctrl & 0x03000000) != urep);
81
82 return 0;
83}
84
85static int
86auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
87{
88 u32 ctrl, stat, timeout, retries;
89 u32 xbuf[4] = {};
90 int ret, i;
91
92 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
93
94 ret = auxch_init(dev, ch);
95 if (ret)
96 goto out;
97
98 stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
99 if (!(stat & 0x10000000)) {
100 AUX_DBG("sink not detected\n");
101 ret = -ENXIO;
102 goto out;
103 }
104
105 if (!(type & 1)) {
106 memcpy(xbuf, data, size);
107 for (i = 0; i < 16; i += 4) {
108 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
109 nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
110 }
111 }
112
113 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
114 ctrl &= ~0x0001f0ff;
115 ctrl |= type << 12;
116 ctrl |= size - 1;
117 nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
118
119 /* retry transaction a number of times on failure... */
120 ret = -EREMOTEIO;
121 for (retries = 0; retries < 32; retries++) {
122 /* reset, and delay a while if this is a retry */
123 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
124 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
125 if (retries)
126 udelay(400);
127
128 /* transaction request, wait up to 1ms for it to complete */
129 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
130
131 timeout = 1000;
132 do {
133 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
134 udelay(1);
135 if (!timeout--) {
136 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
137 goto out;
138 }
139 } while (ctrl & 0x00010000);
140
141 /* read status, and check if transaction completed ok */
142 stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
143 if (!(stat & 0x000f0f00)) {
144 ret = 0;
145 break;
146 }
147
148 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
149 }
150
151 if (type & 1) {
152 for (i = 0; i < 16; i += 4) {
153 xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
154 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
155 }
156 memcpy(data, xbuf, size);
157 }
158
159out:
160 auxch_fini(dev, ch);
161 return ret;
162}
163 35
164u8 * 36u8 *
165nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) 37nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
166{ 38{
39 struct nouveau_drm *drm = nouveau_drm(dev);
167 struct bit_entry d; 40 struct bit_entry d;
168 u8 *table; 41 u8 *table;
169 int i; 42 int i;
170 43
171 if (bit_table(dev, 'd', &d)) { 44 if (bit_table(dev, 'd', &d)) {
172 NV_ERROR(dev, "BIT 'd' table not found\n"); 45 NV_ERROR(drm, "BIT 'd' table not found\n");
173 return NULL; 46 return NULL;
174 } 47 }
175 48
176 if (d.version != 1) { 49 if (d.version != 1) {
177 NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version); 50 NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
178 return NULL; 51 return NULL;
179 } 52 }
180 53
181 table = ROMPTR(dev, d.data[0]); 54 table = ROMPTR(dev, d.data[0]);
182 if (!table) { 55 if (!table) {
183 NV_ERROR(dev, "displayport table pointer invalid\n"); 56 NV_ERROR(drm, "displayport table pointer invalid\n");
184 return NULL; 57 return NULL;
185 } 58 }
186 59
@@ -191,7 +64,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
191 case 0x40: 64 case 0x40:
192 break; 65 break;
193 default: 66 default:
194 NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]); 67 NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
195 return NULL; 68 return NULL;
196 } 69 }
197 70
@@ -201,7 +74,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
201 return table; 74 return table;
202 } 75 }
203 76
204 NV_ERROR(dev, "displayport encoder table not found\n"); 77 NV_ERROR(drm, "displayport encoder table not found\n");
205 return NULL; 78 return NULL;
206} 79}
207 80
@@ -209,9 +82,9 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
209 * link training 82 * link training
210 *****************************************************************************/ 83 *****************************************************************************/
211struct dp_state { 84struct dp_state {
85 struct nouveau_i2c_port *auxch;
212 struct dp_train_func *func; 86 struct dp_train_func *func;
213 struct dcb_entry *dcb; 87 struct dcb_output *dcb;
214 int auxch;
215 int crtc; 88 int crtc;
216 u8 *dpcd; 89 u8 *dpcd;
217 int link_nr; 90 int link_nr;
@@ -223,9 +96,10 @@ struct dp_state {
223static void 96static void
224dp_set_link_config(struct drm_device *dev, struct dp_state *dp) 97dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
225{ 98{
99 struct nouveau_drm *drm = nouveau_drm(dev);
226 u8 sink[2]; 100 u8 sink[2];
227 101
228 NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 102 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
229 103
230 /* set desired link configuration on the source */ 104 /* set desired link configuration on the source */
231 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, 105 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
@@ -237,27 +111,29 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
237 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) 111 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
238 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 112 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
239 113
240 auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); 114 nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
241} 115}
242 116
243static void 117static void
244dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) 118dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
245{ 119{
120 struct nouveau_drm *drm = nouveau_drm(dev);
246 u8 sink_tp; 121 u8 sink_tp;
247 122
248 NV_DEBUG_KMS(dev, "training pattern %d\n", pattern); 123 NV_DEBUG(drm, "training pattern %d\n", pattern);
249 124
250 dp->func->train_set(dev, dp->dcb, pattern); 125 dp->func->train_set(dev, dp->dcb, pattern);
251 126
252 auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 127 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
253 sink_tp &= ~DP_TRAINING_PATTERN_MASK; 128 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
254 sink_tp |= pattern; 129 sink_tp |= pattern;
255 auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 130 nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
256} 131}
257 132
258static int 133static int
259dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) 134dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
260{ 135{
136 struct nouveau_drm *drm = nouveau_drm(dev);
261 int i; 137 int i;
262 138
263 for (i = 0; i < dp->link_nr; i++) { 139 for (i = 0; i < dp->link_nr; i++) {
@@ -271,25 +147,26 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
271 if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5) 147 if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
272 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 148 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
273 149
274 NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); 150 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
275 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); 151 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
276 } 152 }
277 153
278 return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); 154 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
279} 155}
280 156
281static int 157static int
282dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) 158dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
283{ 159{
160 struct nouveau_drm *drm = nouveau_drm(dev);
284 int ret; 161 int ret;
285 162
286 udelay(delay); 163 udelay(delay);
287 164
288 ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6); 165 ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
289 if (ret) 166 if (ret)
290 return ret; 167 return ret;
291 168
292 NV_DEBUG_KMS(dev, "status %*ph\n", 6, dp->stat); 169 NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
293 return 0; 170 return 0;
294} 171}
295 172
@@ -407,7 +284,7 @@ dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
407 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); 284 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
408} 285}
409 286
410bool 287static bool
411nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, 288nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
412 struct dp_train_func *func) 289 struct dp_train_func *func)
413{ 290{
@@ -416,19 +293,20 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
416 struct nouveau_connector *nv_connector = 293 struct nouveau_connector *nv_connector =
417 nouveau_encoder_connector_get(nv_encoder); 294 nouveau_encoder_connector_get(nv_encoder);
418 struct drm_device *dev = encoder->dev; 295 struct drm_device *dev = encoder->dev;
419 struct nouveau_i2c_chan *auxch; 296 struct nouveau_drm *drm = nouveau_drm(dev);
297 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
298 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
420 const u32 bw_list[] = { 270000, 162000, 0 }; 299 const u32 bw_list[] = { 270000, 162000, 0 };
421 const u32 *link_bw = bw_list; 300 const u32 *link_bw = bw_list;
422 struct dp_state dp; 301 struct dp_state dp;
423 302
424 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 303 dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
425 if (!auxch) 304 if (!dp.auxch)
426 return false; 305 return false;
427 306
428 dp.func = func; 307 dp.func = func;
429 dp.dcb = nv_encoder->dcb; 308 dp.dcb = nv_encoder->dcb;
430 dp.crtc = nv_crtc->index; 309 dp.crtc = nv_crtc->index;
431 dp.auxch = auxch->drive;
432 dp.dpcd = nv_encoder->dp.dpcd; 310 dp.dpcd = nv_encoder->dp.dpcd;
433 311
434 /* adjust required bandwidth for 8B/10B coding overhead */ 312 /* adjust required bandwidth for 8B/10B coding overhead */
@@ -438,7 +316,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
438 * we take during link training (DP_SET_POWER is one), we need 316 * we take during link training (DP_SET_POWER is one), we need
439 * to ignore them for the moment to avoid races. 317 * to ignore them for the moment to avoid races.
440 */ 318 */
441 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false); 319 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
442 320
443 /* enable down-spreading, if possible */ 321 /* enable down-spreading, if possible */
444 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); 322 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
@@ -481,7 +359,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
481 dp_link_train_fini(dev, &dp); 359 dp_link_train_fini(dev, &dp);
482 360
483 /* re-enable hotplug detect */ 361 /* re-enable hotplug detect */
484 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true); 362 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
485 return true; 363 return true;
486} 364}
487 365
@@ -490,10 +368,12 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
490 struct dp_train_func *func) 368 struct dp_train_func *func)
491{ 369{
492 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 370 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
493 struct nouveau_i2c_chan *auxch; 371 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
372 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
373 struct nouveau_i2c_port *auxch;
494 u8 status; 374 u8 status;
495 375
496 auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index); 376 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
497 if (!auxch) 377 if (!auxch)
498 return; 378 return;
499 379
@@ -502,27 +382,28 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
502 else 382 else
503 status = DP_SET_POWER_D3; 383 status = DP_SET_POWER_D3;
504 384
505 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); 385 nv_wraux(auxch, DP_SET_POWER, &status, 1);
506 386
507 if (mode == DRM_MODE_DPMS_ON) 387 if (mode == DRM_MODE_DPMS_ON)
508 nouveau_dp_link_train(encoder, datarate, func); 388 nouveau_dp_link_train(encoder, datarate, func);
509} 389}
510 390
511static void 391static void
512nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch, 392nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
513 u8 *dpcd) 393 u8 *dpcd)
514{ 394{
395 struct nouveau_drm *drm = nouveau_drm(dev);
515 u8 buf[3]; 396 u8 buf[3];
516 397
517 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 398 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
518 return; 399 return;
519 400
520 if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3)) 401 if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
521 NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n", 402 NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
522 buf[0], buf[1], buf[2]); 403 buf[0], buf[1], buf[2]);
523 404
524 if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3)) 405 if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
525 NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n", 406 NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
526 buf[0], buf[1], buf[2]); 407 buf[0], buf[1], buf[2]);
527 408
528} 409}
@@ -532,24 +413,26 @@ nouveau_dp_detect(struct drm_encoder *encoder)
532{ 413{
533 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 414 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
534 struct drm_device *dev = encoder->dev; 415 struct drm_device *dev = encoder->dev;
535 struct nouveau_i2c_chan *auxch; 416 struct nouveau_drm *drm = nouveau_drm(dev);
417 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
418 struct nouveau_i2c_port *auxch;
536 u8 *dpcd = nv_encoder->dp.dpcd; 419 u8 *dpcd = nv_encoder->dp.dpcd;
537 int ret; 420 int ret;
538 421
539 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 422 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
540 if (!auxch) 423 if (!auxch)
541 return false; 424 return false;
542 425
543 ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8); 426 ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
544 if (ret) 427 if (ret)
545 return false; 428 return false;
546 429
547 nv_encoder->dp.link_bw = 27000 * dpcd[1]; 430 nv_encoder->dp.link_bw = 27000 * dpcd[1];
548 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; 431 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
549 432
550 NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n", 433 NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
551 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); 434 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
552 NV_DEBUG_KMS(dev, "encoder: %dx%d\n", 435 NV_DEBUG(drm, "encoder: %dx%d\n",
553 nv_encoder->dcb->dpconf.link_nr, 436 nv_encoder->dcb->dpconf.link_nr,
554 nv_encoder->dcb->dpconf.link_bw); 437 nv_encoder->dcb->dpconf.link_bw);
555 438
@@ -558,65 +441,10 @@ nouveau_dp_detect(struct drm_encoder *encoder)
558 if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw) 441 if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
559 nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; 442 nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
560 443
561 NV_DEBUG_KMS(dev, "maximum: %dx%d\n", 444 NV_DEBUG(drm, "maximum: %dx%d\n",
562 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); 445 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
563 446
564 nouveau_dp_probe_oui(dev, auxch, dpcd); 447 nouveau_dp_probe_oui(dev, auxch, dpcd);
565 448
566 return true; 449 return true;
567} 450}
568
569int
570nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
571 uint8_t *data, int data_nr)
572{
573 return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
574}
575
576static int
577nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
578{
579 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
580 struct i2c_msg *msg = msgs;
581 int ret, mcnt = num;
582
583 while (mcnt--) {
584 u8 remaining = msg->len;
585 u8 *ptr = msg->buf;
586
587 while (remaining) {
588 u8 cnt = (remaining > 16) ? 16 : remaining;
589 u8 cmd;
590
591 if (msg->flags & I2C_M_RD)
592 cmd = AUX_I2C_READ;
593 else
594 cmd = AUX_I2C_WRITE;
595
596 if (mcnt || remaining > 16)
597 cmd |= AUX_I2C_MOT;
598
599 ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt);
600 if (ret < 0)
601 return ret;
602
603 ptr += cnt;
604 remaining -= cnt;
605 }
606
607 msg++;
608 }
609
610 return num;
611}
612
613static u32
614nouveau_dp_i2c_func(struct i2c_adapter *adap)
615{
616 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
617}
618
619const struct i2c_algorithm nouveau_dp_i2c_algo = {
620 .master_xfer = nouveau_dp_i2c_xfer,
621 .functionality = nouveau_dp_i2c_func
622};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
new file mode 100644
index 000000000000..e96507e11488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -0,0 +1,693 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/console.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include <core/device.h>
30#include <core/client.h>
31#include <core/gpuobj.h>
32#include <core/class.h>
33
34#include <subdev/device.h>
35#include <subdev/vm.h>
36
37#include "nouveau_drm.h"
38#include "nouveau_irq.h"
39#include "nouveau_dma.h"
40#include "nouveau_ttm.h"
41#include "nouveau_gem.h"
42#include "nouveau_agp.h"
43#include "nouveau_vga.h"
44#include "nouveau_pm.h"
45#include "nouveau_acpi.h"
46#include "nouveau_bios.h"
47#include "nouveau_ioctl.h"
48#include "nouveau_abi16.h"
49#include "nouveau_fbcon.h"
50#include "nouveau_fence.h"
51
52#include "nouveau_ttm.h"
53
54MODULE_PARM_DESC(config, "option string to pass to driver core");
55static char *nouveau_config;
56module_param_named(config, nouveau_config, charp, 0400);
57
58MODULE_PARM_DESC(debug, "debug string to pass to driver core");
59static char *nouveau_debug;
60module_param_named(debug, nouveau_debug, charp, 0400);
61
62MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
63static int nouveau_noaccel = 0;
64module_param_named(noaccel, nouveau_noaccel, int, 0400);
65
66MODULE_PARM_DESC(modeset, "enable driver");
67static int nouveau_modeset = -1;
68module_param_named(modeset, nouveau_modeset, int, 0400);
69
70static struct drm_driver driver;
71
72static u64
73nouveau_name(struct pci_dev *pdev)
74{
75 u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
76 name |= pdev->bus->number << 16;
77 name |= PCI_SLOT(pdev->devfn) << 8;
78 return name | PCI_FUNC(pdev->devfn);
79}
80
81static int
82nouveau_cli_create(struct pci_dev *pdev, const char *name,
83 int size, void **pcli)
84{
85 struct nouveau_cli *cli;
86 int ret;
87
88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
89 nouveau_debug, size, pcli);
90 cli = *pcli;
91 if (ret)
92 return ret;
93
94 mutex_init(&cli->mutex);
95 return 0;
96}
97
98static void
99nouveau_cli_destroy(struct nouveau_cli *cli)
100{
101 struct nouveau_object *client = nv_object(cli);
102 nouveau_vm_ref(NULL, &cli->base.vm, NULL);
103 nouveau_client_fini(&cli->base, false);
104 atomic_set(&client->refcount, 1);
105 nouveau_object_ref(NULL, &client);
106}
107
108static void
109nouveau_accel_fini(struct nouveau_drm *drm)
110{
111 nouveau_gpuobj_ref(NULL, &drm->notify);
112 nouveau_channel_del(&drm->channel);
113 nouveau_channel_del(&drm->cechan);
114 if (drm->fence)
115 nouveau_fence(drm)->dtor(drm);
116}
117
118static void
119nouveau_accel_init(struct nouveau_drm *drm)
120{
121 struct nouveau_device *device = nv_device(drm->device);
122 struct nouveau_object *object;
123 u32 arg0, arg1;
124 int ret;
125
126 if (nouveau_noaccel)
127 return;
128
129 /* initialise synchronisation routines */
130 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
131 else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
132 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
133 else ret = nvc0_fence_create(drm);
134 if (ret) {
135 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
136 nouveau_accel_fini(drm);
137 return;
138 }
139
140 if (device->card_type >= NV_E0) {
141 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
142 NVDRM_CHAN + 1,
143 NVE0_CHANNEL_IND_ENGINE_CE0 |
144 NVE0_CHANNEL_IND_ENGINE_CE1, 0,
145 &drm->cechan);
146 if (ret)
147 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
148
149 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
150 arg1 = 0;
151 } else {
152 arg0 = NvDmaFB;
153 arg1 = NvDmaTT;
154 }
155
156 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
157 arg0, arg1, &drm->channel);
158 if (ret) {
159 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
160 nouveau_accel_fini(drm);
161 return;
162 }
163
164 if (device->card_type < NV_C0) {
165 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
166 &drm->notify);
167 if (ret) {
168 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
169 nouveau_accel_fini(drm);
170 return;
171 }
172
173 ret = nouveau_object_new(nv_object(drm),
174 drm->channel->handle, NvNotify0,
175 0x003d, &(struct nv_dma_class) {
176 .flags = NV_DMA_TARGET_VRAM |
177 NV_DMA_ACCESS_RDWR,
178 .start = drm->notify->addr,
179 .limit = drm->notify->addr + 31
180 }, sizeof(struct nv_dma_class),
181 &object);
182 if (ret) {
183 nouveau_accel_fini(drm);
184 return;
185 }
186 }
187
188
189 nouveau_bo_move_init(drm);
190}
191
192static int __devinit
193nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
194{
195 struct nouveau_device *device;
196 struct apertures_struct *aper;
197 bool boot = false;
198 int ret;
199
200 /* remove conflicting drivers (vesafb, efifb etc) */
201 aper = alloc_apertures(3);
202 if (!aper)
203 return -ENOMEM;
204
205 aper->ranges[0].base = pci_resource_start(pdev, 1);
206 aper->ranges[0].size = pci_resource_len(pdev, 1);
207 aper->count = 1;
208
209 if (pci_resource_len(pdev, 2)) {
210 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
211 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
212 aper->count++;
213 }
214
215 if (pci_resource_len(pdev, 3)) {
216 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
217 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
218 aper->count++;
219 }
220
221#ifdef CONFIG_X86
222 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
223#endif
224 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
225
226 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
227 nouveau_config, nouveau_debug, &device);
228 if (ret)
229 return ret;
230
231 pci_set_master(pdev);
232
233 ret = drm_get_pci_dev(pdev, pent, &driver);
234 if (ret) {
235 nouveau_object_ref(NULL, (struct nouveau_object **)&device);
236 return ret;
237 }
238
239 return 0;
240}
241
242static int
243nouveau_drm_load(struct drm_device *dev, unsigned long flags)
244{
245 struct pci_dev *pdev = dev->pdev;
246 struct nouveau_device *device;
247 struct nouveau_drm *drm;
248 int ret;
249
250 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
251 if (ret)
252 return ret;
253
254 dev->dev_private = drm;
255 drm->dev = dev;
256
257 INIT_LIST_HEAD(&drm->clients);
258 spin_lock_init(&drm->tile.lock);
259
260 /* make sure AGP controller is in a consistent state before we
261 * (possibly) execute vbios init tables (see nouveau_agp.h)
262 */
263 if (drm_pci_device_is_agp(dev) && dev->agp) {
264 /* dummy device object, doesn't init anything, but allows
265 * agp code access to registers
266 */
267 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
268 NVDRM_DEVICE, 0x0080,
269 &(struct nv_device_class) {
270 .device = ~0,
271 .disable =
272 ~(NV_DEVICE_DISABLE_MMIO |
273 NV_DEVICE_DISABLE_IDENTIFY),
274 .debug0 = ~0,
275 }, sizeof(struct nv_device_class),
276 &drm->device);
277 if (ret)
278 goto fail_device;
279
280 nouveau_agp_reset(drm);
281 nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
282 }
283
284 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
285 0x0080, &(struct nv_device_class) {
286 .device = ~0,
287 .disable = 0,
288 .debug0 = 0,
289 }, sizeof(struct nv_device_class),
290 &drm->device);
291 if (ret)
292 goto fail_device;
293
294 /* workaround an odd issue on nvc1 by disabling the device's
295 * nosnoop capability. hopefully won't cause issues until a
296 * better fix is found - assuming there is one...
297 */
298 device = nv_device(drm->device);
299 if (nv_device(drm->device)->chipset == 0xc1)
300 nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
301
302 nouveau_vga_init(drm);
303 nouveau_agp_init(drm);
304
305 if (device->card_type >= NV_50) {
306 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
307 0x1000, &drm->client.base.vm);
308 if (ret)
309 goto fail_device;
310 }
311
312 ret = nouveau_ttm_init(drm);
313 if (ret)
314 goto fail_ttm;
315
316 ret = nouveau_bios_init(dev);
317 if (ret)
318 goto fail_bios;
319
320 ret = nouveau_irq_init(dev);
321 if (ret)
322 goto fail_irq;
323
324 ret = nouveau_display_create(dev);
325 if (ret)
326 goto fail_dispctor;
327
328 if (dev->mode_config.num_crtc) {
329 ret = nouveau_display_init(dev);
330 if (ret)
331 goto fail_dispinit;
332 }
333
334 nouveau_pm_init(dev);
335
336 nouveau_accel_init(drm);
337 nouveau_fbcon_init(dev);
338 return 0;
339
340fail_dispinit:
341 nouveau_display_destroy(dev);
342fail_dispctor:
343 nouveau_irq_fini(dev);
344fail_irq:
345 nouveau_bios_takedown(dev);
346fail_bios:
347 nouveau_ttm_fini(drm);
348fail_ttm:
349 nouveau_agp_fini(drm);
350 nouveau_vga_fini(drm);
351fail_device:
352 nouveau_cli_destroy(&drm->client);
353 return ret;
354}
355
356static int
357nouveau_drm_unload(struct drm_device *dev)
358{
359 struct nouveau_drm *drm = nouveau_drm(dev);
360
361 nouveau_fbcon_fini(dev);
362 nouveau_accel_fini(drm);
363
364 nouveau_pm_fini(dev);
365
366 nouveau_display_fini(dev);
367 nouveau_display_destroy(dev);
368
369 nouveau_irq_fini(dev);
370 nouveau_bios_takedown(dev);
371
372 nouveau_ttm_fini(drm);
373 nouveau_agp_fini(drm);
374 nouveau_vga_fini(drm);
375
376 nouveau_cli_destroy(&drm->client);
377 return 0;
378}
379
380static void
381nouveau_drm_remove(struct pci_dev *pdev)
382{
383 struct drm_device *dev = pci_get_drvdata(pdev);
384 struct nouveau_drm *drm = nouveau_drm(dev);
385 struct nouveau_object *device;
386
387 device = drm->client.base.device;
388 drm_put_dev(dev);
389
390 nouveau_object_ref(NULL, &device);
391 nouveau_object_debug();
392}
393
394int
395nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
396{
397 struct drm_device *dev = pci_get_drvdata(pdev);
398 struct nouveau_drm *drm = nouveau_drm(dev);
399 struct nouveau_cli *cli;
400 int ret;
401
402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
403 pm_state.event == PM_EVENT_PRETHAW)
404 return 0;
405
406 NV_INFO(drm, "suspending fbcon...\n");
407 nouveau_fbcon_set_suspend(dev, 1);
408
409 NV_INFO(drm, "suspending display...\n");
410 ret = nouveau_display_suspend(dev);
411 if (ret)
412 return ret;
413
414 NV_INFO(drm, "evicting buffers...\n");
415 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
416
417 if (drm->fence && nouveau_fence(drm)->suspend) {
418 if (!nouveau_fence(drm)->suspend(drm))
419 return -ENOMEM;
420 }
421
422 NV_INFO(drm, "suspending client object trees...\n");
423 list_for_each_entry(cli, &drm->clients, head) {
424 ret = nouveau_client_fini(&cli->base, true);
425 if (ret)
426 goto fail_client;
427 }
428
429 ret = nouveau_client_fini(&drm->client.base, true);
430 if (ret)
431 goto fail_client;
432
433 nouveau_agp_fini(drm);
434
435 pci_save_state(pdev);
436 if (pm_state.event == PM_EVENT_SUSPEND) {
437 pci_disable_device(pdev);
438 pci_set_power_state(pdev, PCI_D3hot);
439 }
440
441 return 0;
442
443fail_client:
444 list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
445 nouveau_client_init(&cli->base);
446 }
447
448 NV_INFO(drm, "resuming display...\n");
449 nouveau_display_resume(dev);
450 return ret;
451}
452
453int
454nouveau_drm_resume(struct pci_dev *pdev)
455{
456 struct drm_device *dev = pci_get_drvdata(pdev);
457 struct nouveau_drm *drm = nouveau_drm(dev);
458 struct nouveau_cli *cli;
459 int ret;
460
461 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
462 return 0;
463
464 NV_INFO(drm, "re-enabling device...\n");
465 pci_set_power_state(pdev, PCI_D0);
466 pci_restore_state(pdev);
467 ret = pci_enable_device(pdev);
468 if (ret)
469 return ret;
470 pci_set_master(pdev);
471
472 nouveau_agp_reset(drm);
473
474 NV_INFO(drm, "resuming client object trees...\n");
475 nouveau_client_init(&drm->client.base);
476 nouveau_agp_init(drm);
477
478 list_for_each_entry(cli, &drm->clients, head) {
479 nouveau_client_init(&cli->base);
480 }
481
482 if (drm->fence && nouveau_fence(drm)->resume)
483 nouveau_fence(drm)->resume(drm);
484
485 nouveau_run_vbios_init(dev);
486 nouveau_irq_postinstall(dev);
487 nouveau_pm_resume(dev);
488
489 NV_INFO(drm, "resuming display...\n");
490 nouveau_display_resume(dev);
491 return 0;
492}
493
494static int
495nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
496{
497 struct pci_dev *pdev = dev->pdev;
498 struct nouveau_drm *drm = nouveau_drm(dev);
499 struct nouveau_cli *cli;
500 char name[16];
501 int ret;
502
503 snprintf(name, sizeof(name), "%d", fpriv->pid);
504
505 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
506 if (ret)
507 return ret;
508
509 if (nv_device(drm->device)->card_type >= NV_50) {
510 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
511 0x1000, &cli->base.vm);
512 if (ret) {
513 nouveau_cli_destroy(cli);
514 return ret;
515 }
516 }
517
518 fpriv->driver_priv = cli;
519
520 mutex_lock(&drm->client.mutex);
521 list_add(&cli->head, &drm->clients);
522 mutex_unlock(&drm->client.mutex);
523 return 0;
524}
525
526static void
527nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
528{
529 struct nouveau_cli *cli = nouveau_cli(fpriv);
530 struct nouveau_drm *drm = nouveau_drm(dev);
531
532 if (cli->abi16)
533 nouveau_abi16_fini(cli->abi16);
534
535 mutex_lock(&drm->client.mutex);
536 list_del(&cli->head);
537 mutex_unlock(&drm->client.mutex);
538}
539
540static void
541nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
542{
543 struct nouveau_cli *cli = nouveau_cli(fpriv);
544 nouveau_cli_destroy(cli);
545}
546
547static struct drm_ioctl_desc
548nouveau_ioctls[] = {
549 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
550 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
551 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
552 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
553 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
554 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
555 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
556 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
557 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
558 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
559 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
560 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
561};
562
563static const struct file_operations
564nouveau_driver_fops = {
565 .owner = THIS_MODULE,
566 .open = drm_open,
567 .release = drm_release,
568 .unlocked_ioctl = drm_ioctl,
569 .mmap = nouveau_ttm_mmap,
570 .poll = drm_poll,
571 .fasync = drm_fasync,
572 .read = drm_read,
573#if defined(CONFIG_COMPAT)
574 .compat_ioctl = nouveau_compat_ioctl,
575#endif
576 .llseek = noop_llseek,
577};
578
579static struct drm_driver
580driver = {
581 .driver_features =
582 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
583 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
584 DRIVER_MODESET | DRIVER_PRIME,
585
586 .load = nouveau_drm_load,
587 .unload = nouveau_drm_unload,
588 .open = nouveau_drm_open,
589 .preclose = nouveau_drm_preclose,
590 .postclose = nouveau_drm_postclose,
591 .lastclose = nouveau_vga_lastclose,
592
593 .irq_preinstall = nouveau_irq_preinstall,
594 .irq_postinstall = nouveau_irq_postinstall,
595 .irq_uninstall = nouveau_irq_uninstall,
596 .irq_handler = nouveau_irq_handler,
597
598 .get_vblank_counter = drm_vblank_count,
599 .enable_vblank = nouveau_vblank_enable,
600 .disable_vblank = nouveau_vblank_disable,
601
602 .ioctls = nouveau_ioctls,
603 .fops = &nouveau_driver_fops,
604
605 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
606 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
607 .gem_prime_export = nouveau_gem_prime_export,
608 .gem_prime_import = nouveau_gem_prime_import,
609
610 .gem_init_object = nouveau_gem_object_new,
611 .gem_free_object = nouveau_gem_object_del,
612 .gem_open_object = nouveau_gem_object_open,
613 .gem_close_object = nouveau_gem_object_close,
614
615 .dumb_create = nouveau_display_dumb_create,
616 .dumb_map_offset = nouveau_display_dumb_map_offset,
617 .dumb_destroy = nouveau_display_dumb_destroy,
618
619 .name = DRIVER_NAME,
620 .desc = DRIVER_DESC,
621#ifdef GIT_REVISION
622 .date = GIT_REVISION,
623#else
624 .date = DRIVER_DATE,
625#endif
626 .major = DRIVER_MAJOR,
627 .minor = DRIVER_MINOR,
628 .patchlevel = DRIVER_PATCHLEVEL,
629};
630
631static struct pci_device_id
632nouveau_drm_pci_table[] = {
633 {
634 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
635 .class = PCI_BASE_CLASS_DISPLAY << 16,
636 .class_mask = 0xff << 16,
637 },
638 {
639 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
640 .class = PCI_BASE_CLASS_DISPLAY << 16,
641 .class_mask = 0xff << 16,
642 },
643 {}
644};
645
646static struct pci_driver
647nouveau_drm_pci_driver = {
648 .name = "nouveau",
649 .id_table = nouveau_drm_pci_table,
650 .probe = nouveau_drm_probe,
651 .remove = nouveau_drm_remove,
652 .suspend = nouveau_drm_suspend,
653 .resume = nouveau_drm_resume,
654};
655
656static int __init
657nouveau_drm_init(void)
658{
659 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
660
661 if (nouveau_modeset == -1) {
662#ifdef CONFIG_VGA_CONSOLE
663 if (vgacon_text_force())
664 nouveau_modeset = 0;
665 else
666#endif
667 nouveau_modeset = 1;
668 }
669
670 if (!nouveau_modeset)
671 return 0;
672
673 nouveau_register_dsm_handler();
674 return drm_pci_init(&driver, &nouveau_drm_pci_driver);
675}
676
677static void __exit
678nouveau_drm_exit(void)
679{
680 if (!nouveau_modeset)
681 return;
682
683 drm_pci_exit(&driver, &nouveau_drm_pci_driver);
684 nouveau_unregister_dsm_handler();
685}
686
687module_init(nouveau_drm_init);
688module_exit(nouveau_drm_exit);
689
690MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
691MODULE_AUTHOR(DRIVER_AUTHOR);
692MODULE_DESCRIPTION(DRIVER_DESC);
693MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
new file mode 100644
index 000000000000..3c12e9862e37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -0,0 +1,144 @@
1#ifndef __NOUVEAU_DRMCLI_H__
2#define __NOUVEAU_DRMCLI_H__
3
4#define DRIVER_AUTHOR "Nouveau Project"
5#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
6
7#define DRIVER_NAME "nouveau"
8#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla"
9#define DRIVER_DATE "20120801"
10
11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 1
13#define DRIVER_PATCHLEVEL 0
14
15#include <core/client.h>
16
17#include <subdev/vm.h>
18
19#include <drmP.h>
20#include <drm/nouveau_drm.h>
21
22#include "ttm/ttm_bo_api.h"
23#include "ttm/ttm_bo_driver.h"
24#include "ttm/ttm_placement.h"
25#include "ttm/ttm_memory.h"
26#include "ttm/ttm_module.h"
27#include "ttm/ttm_page_alloc.h"
28
29struct nouveau_channel;
30
31#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
32
33#include "nouveau_fence.h"
34#include "nouveau_bios.h"
35
36struct nouveau_drm_tile {
37 struct nouveau_fence *fence;
38 bool used;
39};
40
41enum nouveau_drm_handle {
42 NVDRM_CLIENT = 0xffffffff,
43 NVDRM_DEVICE = 0xdddddddd,
44 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
45 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
46};
47
48struct nouveau_cli {
49 struct nouveau_client base;
50 struct list_head head;
51 struct mutex mutex;
52 void *abi16;
53};
54
55static inline struct nouveau_cli *
56nouveau_cli(struct drm_file *fpriv)
57{
58 return fpriv ? fpriv->driver_priv : NULL;
59}
60
61struct nouveau_drm {
62 struct nouveau_cli client;
63 struct drm_device *dev;
64
65 struct nouveau_object *device;
66 struct list_head clients;
67
68 struct {
69 enum {
70 UNKNOWN = 0,
71 DISABLE = 1,
72 ENABLED = 2
73 } stat;
74 u32 base;
75 u32 size;
76 } agp;
77
78 /* TTM interface support */
79 struct {
80 struct drm_global_reference mem_global_ref;
81 struct ttm_bo_global_ref bo_global_ref;
82 struct ttm_bo_device bdev;
83 atomic_t validate_sequence;
84 int (*move)(struct nouveau_channel *,
85 struct ttm_buffer_object *,
86 struct ttm_mem_reg *, struct ttm_mem_reg *);
87 int mtrr;
88 } ttm;
89
90 /* GEM interface support */
91 struct {
92 u64 vram_available;
93 u64 gart_available;
94 } gem;
95
96 /* synchronisation */
97 void *fence;
98
99 /* context for accelerated drm-internal operations */
100 struct nouveau_channel *cechan;
101 struct nouveau_channel *channel;
102 struct nouveau_gpuobj *notify;
103 struct nouveau_fbdev *fbcon;
104
105 /* nv10-nv40 tiling regions */
106 struct {
107 struct nouveau_drm_tile reg[15];
108 spinlock_t lock;
109 } tile;
110
111 /* modesetting */
112 struct nvbios vbios;
113 struct nouveau_display *display;
114 struct backlight_device *backlight;
115
116 /* power management */
117 struct nouveau_pm *pm;
118};
119
120static inline struct nouveau_drm *
121nouveau_drm(struct drm_device *dev)
122{
123 return dev->dev_private;
124}
125
126static inline struct nouveau_device *
127nouveau_dev(struct drm_device *dev)
128{
129 return nv_device(nouveau_drm(dev)->device);
130}
131
132int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
133int nouveau_drm_resume(struct pci_dev *);
134
135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
137#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
138#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
139#define NV_DEBUG(cli, fmt, args...) do { \
140 if (drm_debug & DRM_UT_DRIVER) \
141 nv_info((cli), fmt, ##args); \
142} while (0)
143
144#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
deleted file mode 100644
index 9a36f5f39b06..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ /dev/null
@@ -1,513 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/console.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29#include "drm.h"
30#include "drm_crtc_helper.h"
31#include "nouveau_drv.h"
32#include "nouveau_abi16.h"
33#include "nouveau_hw.h"
34#include "nouveau_fb.h"
35#include "nouveau_fbcon.h"
36#include "nouveau_pm.h"
37#include "nouveau_fifo.h"
38#include "nv50_display.h"
39
40#include "drm_pciids.h"
41
42MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
43int nouveau_agpmode = -1;
44module_param_named(agpmode, nouveau_agpmode, int, 0400);
45
46MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
47int nouveau_modeset = -1;
48module_param_named(modeset, nouveau_modeset, int, 0400);
49
50MODULE_PARM_DESC(vbios, "Override default VBIOS location");
51char *nouveau_vbios;
52module_param_named(vbios, nouveau_vbios, charp, 0400);
53
54MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
55int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify = 0;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61
62MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
63char *nouveau_vram_type;
64module_param_named(vram_type, nouveau_vram_type, charp, 0400);
65
66MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
67int nouveau_duallink = 1;
68module_param_named(duallink, nouveau_duallink, int, 0400);
69
70MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
71int nouveau_uscript_lvds = -1;
72module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
73
74MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
75int nouveau_uscript_tmds = -1;
76module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
77
78MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
79int nouveau_ignorelid = 0;
80module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
81
82MODULE_PARM_DESC(noaccel, "Disable all acceleration");
83int nouveau_noaccel = -1;
84module_param_named(noaccel, nouveau_noaccel, int, 0400);
85
86MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
87int nouveau_nofbaccel = 0;
88module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
89
90MODULE_PARM_DESC(force_post, "Force POST");
91int nouveau_force_post = 0;
92module_param_named(force_post, nouveau_force_post, int, 0400);
93
94MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
95int nouveau_override_conntype = 0;
96module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
97
98MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
99int nouveau_tv_disable = 0;
100module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
101
102MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
103 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
104 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
105 "\t\tDefault: PAL\n"
106 "\t\t*NOTE* Ignored for cards with external TV encoders.");
107char *nouveau_tv_norm;
108module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
109
110MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
111 "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
112 "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
113 "\t\t0x100 vgaattr, 0x200 EVO (G80+)");
114int nouveau_reg_debug;
115module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
116
117MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
118char *nouveau_perflvl;
119module_param_named(perflvl, nouveau_perflvl, charp, 0400);
120
121MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
122int nouveau_perflvl_wr;
123module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
124
125MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
126int nouveau_msi;
127module_param_named(msi, nouveau_msi, int, 0400);
128
129MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
130int nouveau_ctxfw;
131module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
132
133MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
134int nouveau_mxmdcb = 1;
135module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
136
137int nouveau_fbpercrtc;
138#if 0
139module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
140#endif
141
142static struct pci_device_id pciidlist[] = {
143 {
144 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
145 .class = PCI_BASE_CLASS_DISPLAY << 16,
146 .class_mask = 0xff << 16,
147 },
148 {
149 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
150 .class = PCI_BASE_CLASS_DISPLAY << 16,
151 .class_mask = 0xff << 16,
152 },
153 {}
154};
155
156MODULE_DEVICE_TABLE(pci, pciidlist);
157
158static struct drm_driver driver;
159
160static int __devinit
161nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
162{
163 return drm_get_pci_dev(pdev, ent, &driver);
164}
165
166static void
167nouveau_pci_remove(struct pci_dev *pdev)
168{
169 struct drm_device *dev = pci_get_drvdata(pdev);
170
171 drm_put_dev(dev);
172}
173
174int
175nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
176{
177 struct drm_device *dev = pci_get_drvdata(pdev);
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
180 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
181 struct nouveau_channel *chan;
182 struct drm_crtc *crtc;
183 int ret, i, e;
184
185 if (pm_state.event == PM_EVENT_PRETHAW)
186 return 0;
187
188 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
189 return 0;
190
191 NV_INFO(dev, "Disabling display...\n");
192 nouveau_display_fini(dev);
193
194 NV_INFO(dev, "Disabling fbcon...\n");
195 nouveau_fbcon_set_suspend(dev, 1);
196
197 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
198 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
199 struct nouveau_framebuffer *nouveau_fb;
200
201 nouveau_fb = nouveau_framebuffer(crtc->fb);
202 if (!nouveau_fb || !nouveau_fb->nvbo)
203 continue;
204
205 nouveau_bo_unpin(nouveau_fb->nvbo);
206 }
207
208 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
209 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
210
211 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
212 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
213 }
214
215 NV_INFO(dev, "Evicting buffers...\n");
216 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
217
218 NV_INFO(dev, "Idling channels...\n");
219 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
220 chan = dev_priv->channels.ptr[i];
221
222 if (chan && chan->pushbuf_bo)
223 nouveau_channel_idle(chan);
224 }
225
226 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
227 if (!dev_priv->eng[e])
228 continue;
229
230 ret = dev_priv->eng[e]->fini(dev, e, true);
231 if (ret) {
232 NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
233 goto out_abort;
234 }
235 }
236
237 ret = pinstmem->suspend(dev);
238 if (ret) {
239 NV_ERROR(dev, "... failed: %d\n", ret);
240 goto out_abort;
241 }
242
243 NV_INFO(dev, "Suspending GPU objects...\n");
244 ret = nouveau_gpuobj_suspend(dev);
245 if (ret) {
246 NV_ERROR(dev, "... failed: %d\n", ret);
247 pinstmem->resume(dev);
248 goto out_abort;
249 }
250
251 NV_INFO(dev, "And we're gone!\n");
252 pci_save_state(pdev);
253 if (pm_state.event == PM_EVENT_SUSPEND) {
254 pci_disable_device(pdev);
255 pci_set_power_state(pdev, PCI_D3hot);
256 }
257
258 return 0;
259
260out_abort:
261 NV_INFO(dev, "Re-enabling acceleration..\n");
262 for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
263 if (dev_priv->eng[e])
264 dev_priv->eng[e]->init(dev, e);
265 }
266 return ret;
267}
268
269int
270nouveau_pci_resume(struct pci_dev *pdev)
271{
272 struct drm_device *dev = pci_get_drvdata(pdev);
273 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
274 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 struct nouveau_engine *engine = &dev_priv->engine;
276 struct drm_crtc *crtc;
277 int ret, i;
278
279 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
280 return 0;
281
282 NV_INFO(dev, "We're back, enabling device...\n");
283 pci_set_power_state(pdev, PCI_D0);
284 pci_restore_state(pdev);
285 if (pci_enable_device(pdev))
286 return -1;
287 pci_set_master(dev->pdev);
288
289 /* Make sure the AGP controller is in a consistent state */
290 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
291 nouveau_mem_reset_agp(dev);
292
293 /* Make the CRTCs accessible */
294 engine->display.early_init(dev);
295
296 NV_INFO(dev, "POSTing device...\n");
297 ret = nouveau_run_vbios_init(dev);
298 if (ret)
299 return ret;
300
301 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
302 ret = nouveau_mem_init_agp(dev);
303 if (ret) {
304 NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
305 return ret;
306 }
307 }
308
309 NV_INFO(dev, "Restoring GPU objects...\n");
310 nouveau_gpuobj_resume(dev);
311
312 NV_INFO(dev, "Reinitialising engines...\n");
313 engine->instmem.resume(dev);
314 engine->mc.init(dev);
315 engine->timer.init(dev);
316 engine->fb.init(dev);
317 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
318 if (dev_priv->eng[i])
319 dev_priv->eng[i]->init(dev, i);
320 }
321
322 nouveau_irq_postinstall(dev);
323
324 /* Re-write SKIPS, they'll have been lost over the suspend */
325 if (nouveau_vram_pushbuf) {
326 struct nouveau_channel *chan;
327 int j;
328
329 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
330 chan = dev_priv->channels.ptr[i];
331 if (!chan || !chan->pushbuf_bo)
332 continue;
333
334 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
335 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
336 }
337 }
338
339 nouveau_pm_resume(dev);
340
341 NV_INFO(dev, "Restoring mode...\n");
342 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
343 struct nouveau_framebuffer *nouveau_fb;
344
345 nouveau_fb = nouveau_framebuffer(crtc->fb);
346 if (!nouveau_fb || !nouveau_fb->nvbo)
347 continue;
348
349 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
350 }
351
352 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
354
355 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
356 if (!ret)
357 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
358 if (ret)
359 NV_ERROR(dev, "Could not pin/map cursor.\n");
360 }
361
362 nouveau_fbcon_set_suspend(dev, 0);
363 nouveau_fbcon_zfill_all(dev);
364
365 nouveau_display_init(dev);
366
367 /* Force CLUT to get re-loaded during modeset */
368 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
369 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
370
371 nv_crtc->lut.depth = 0;
372 }
373
374 drm_helper_resume_force_mode(dev);
375
376 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
377 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
378 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
379
380 nv_crtc->cursor.set_offset(nv_crtc, offset);
381 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
382 nv_crtc->cursor_saved_y);
383 }
384
385 return 0;
386}
387
388static struct drm_ioctl_desc nouveau_ioctls[] = {
389 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
390 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
391 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
392 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
393 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
394 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
395 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
396 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
397 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
398 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
399 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
400 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
401};
402
403static const struct file_operations nouveau_driver_fops = {
404 .owner = THIS_MODULE,
405 .open = drm_open,
406 .release = drm_release,
407 .unlocked_ioctl = drm_ioctl,
408 .mmap = nouveau_ttm_mmap,
409 .poll = drm_poll,
410 .fasync = drm_fasync,
411 .read = drm_read,
412#if defined(CONFIG_COMPAT)
413 .compat_ioctl = nouveau_compat_ioctl,
414#endif
415 .llseek = noop_llseek,
416};
417
418static struct drm_driver driver = {
419 .driver_features =
420 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
421 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
422 DRIVER_MODESET | DRIVER_PRIME,
423 .load = nouveau_load,
424 .firstopen = nouveau_firstopen,
425 .lastclose = nouveau_lastclose,
426 .unload = nouveau_unload,
427 .open = nouveau_open,
428 .preclose = nouveau_preclose,
429 .postclose = nouveau_postclose,
430#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
431 .debugfs_init = nouveau_debugfs_init,
432 .debugfs_cleanup = nouveau_debugfs_takedown,
433#endif
434 .irq_preinstall = nouveau_irq_preinstall,
435 .irq_postinstall = nouveau_irq_postinstall,
436 .irq_uninstall = nouveau_irq_uninstall,
437 .irq_handler = nouveau_irq_handler,
438 .get_vblank_counter = drm_vblank_count,
439 .enable_vblank = nouveau_vblank_enable,
440 .disable_vblank = nouveau_vblank_disable,
441 .ioctls = nouveau_ioctls,
442 .fops = &nouveau_driver_fops,
443
444 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
445 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
446 .gem_prime_export = nouveau_gem_prime_export,
447 .gem_prime_import = nouveau_gem_prime_import,
448
449 .gem_init_object = nouveau_gem_object_new,
450 .gem_free_object = nouveau_gem_object_del,
451 .gem_open_object = nouveau_gem_object_open,
452 .gem_close_object = nouveau_gem_object_close,
453
454 .dumb_create = nouveau_display_dumb_create,
455 .dumb_map_offset = nouveau_display_dumb_map_offset,
456 .dumb_destroy = nouveau_display_dumb_destroy,
457
458 .name = DRIVER_NAME,
459 .desc = DRIVER_DESC,
460#ifdef GIT_REVISION
461 .date = GIT_REVISION,
462#else
463 .date = DRIVER_DATE,
464#endif
465 .major = DRIVER_MAJOR,
466 .minor = DRIVER_MINOR,
467 .patchlevel = DRIVER_PATCHLEVEL,
468};
469
470static struct pci_driver nouveau_pci_driver = {
471 .name = DRIVER_NAME,
472 .id_table = pciidlist,
473 .probe = nouveau_pci_probe,
474 .remove = nouveau_pci_remove,
475 .suspend = nouveau_pci_suspend,
476 .resume = nouveau_pci_resume
477};
478
479static int __init nouveau_init(void)
480{
481 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
482
483 if (nouveau_modeset == -1) {
484#ifdef CONFIG_VGA_CONSOLE
485 if (vgacon_text_force())
486 nouveau_modeset = 0;
487 else
488#endif
489 nouveau_modeset = 1;
490 }
491
492 if (!nouveau_modeset)
493 return 0;
494
495 nouveau_register_dsm_handler();
496 return drm_pci_init(&driver, &nouveau_pci_driver);
497}
498
499static void __exit nouveau_exit(void)
500{
501 if (!nouveau_modeset)
502 return;
503
504 drm_pci_exit(&driver, &nouveau_pci_driver);
505 nouveau_unregister_dsm_handler();
506}
507
508module_init(nouveau_init);
509module_exit(nouveau_exit);
510
511MODULE_AUTHOR(DRIVER_AUTHOR);
512MODULE_DESCRIPTION(DRIVER_DESC);
513MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
deleted file mode 100644
index 4f2cc95ce264..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ /dev/null
@@ -1,1655 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRV_H__
26#define __NOUVEAU_DRV_H__
27
28#define DRIVER_AUTHOR "Stephane Marchesin"
29#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
30
31#define DRIVER_NAME "nouveau"
32#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
33#define DRIVER_DATE "20120316"
34
35#define DRIVER_MAJOR 1
36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 0
38
39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000
41
42#include "ttm/ttm_bo_api.h"
43#include "ttm/ttm_bo_driver.h"
44#include "ttm/ttm_placement.h"
45#include "ttm/ttm_memory.h"
46#include "ttm/ttm_module.h"
47
48struct nouveau_fpriv {
49 spinlock_t lock;
50 struct list_head channels;
51 struct nouveau_vm *vm;
52};
53
54static inline struct nouveau_fpriv *
55nouveau_fpriv(struct drm_file *file_priv)
56{
57 return file_priv ? file_priv->driver_priv : NULL;
58}
59
60#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
61
62#include "nouveau_drm.h"
63#include "nouveau_reg.h"
64#include "nouveau_bios.h"
65#include "nouveau_util.h"
66
67struct nouveau_grctx;
68struct nouveau_mem;
69#include "nouveau_vm.h"
70
71#define MAX_NUM_DCB_ENTRIES 16
72
73#define NOUVEAU_MAX_CHANNEL_NR 4096
74#define NOUVEAU_MAX_TILE_NR 15
75
76struct nouveau_mem {
77 struct drm_device *dev;
78
79 struct nouveau_vma bar_vma;
80 struct nouveau_vma vma[2];
81 u8 page_shift;
82
83 struct drm_mm_node *tag;
84 struct list_head regions;
85 dma_addr_t *pages;
86 u32 memtype;
87 u64 offset;
88 u64 size;
89 struct sg_table *sg;
90};
91
92struct nouveau_tile_reg {
93 bool used;
94 uint32_t addr;
95 uint32_t limit;
96 uint32_t pitch;
97 uint32_t zcomp;
98 struct drm_mm_node *tag_mem;
99 struct nouveau_fence *fence;
100};
101
102struct nouveau_bo {
103 struct ttm_buffer_object bo;
104 struct ttm_placement placement;
105 u32 valid_domains;
106 u32 placements[3];
107 u32 busy_placements[3];
108 struct ttm_bo_kmap_obj kmap;
109 struct list_head head;
110
111 /* protected by ttm_bo_reserve() */
112 struct drm_file *reserved_by;
113 struct list_head entry;
114 int pbbo_index;
115 bool validate_mapped;
116
117 struct list_head vma_list;
118 unsigned page_shift;
119
120 uint32_t tile_mode;
121 uint32_t tile_flags;
122 struct nouveau_tile_reg *tile;
123
124 struct drm_gem_object *gem;
125 int pin_refcnt;
126
127 struct ttm_bo_kmap_obj dma_buf_vmap;
128 int vmapping_count;
129};
130
131#define nouveau_bo_tile_layout(nvbo) \
132 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
133
134static inline struct nouveau_bo *
135nouveau_bo(struct ttm_buffer_object *bo)
136{
137 return container_of(bo, struct nouveau_bo, bo);
138}
139
140static inline struct nouveau_bo *
141nouveau_gem_object(struct drm_gem_object *gem)
142{
143 return gem ? gem->driver_private : NULL;
144}
145
146/* TODO: submit equivalent to TTM generic API upstream? */
147static inline void __iomem *
148nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
149{
150 bool is_iomem;
151 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
152 &nvbo->kmap, &is_iomem);
153 WARN_ON_ONCE(ioptr && !is_iomem);
154 return ioptr;
155}
156
157enum nouveau_flags {
158 NV_NFORCE = 0x10000000,
159 NV_NFORCE2 = 0x20000000
160};
161
162#define NVOBJ_ENGINE_SW 0
163#define NVOBJ_ENGINE_GR 1
164#define NVOBJ_ENGINE_CRYPT 2
165#define NVOBJ_ENGINE_COPY0 3
166#define NVOBJ_ENGINE_COPY1 4
167#define NVOBJ_ENGINE_MPEG 5
168#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
169#define NVOBJ_ENGINE_BSP 6
170#define NVOBJ_ENGINE_VP 7
171#define NVOBJ_ENGINE_FIFO 14
172#define NVOBJ_ENGINE_FENCE 15
173#define NVOBJ_ENGINE_NR 16
174#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
175
176#define NVOBJ_FLAG_DONT_MAP (1 << 0)
177#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
178#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
179#define NVOBJ_FLAG_VM (1 << 3)
180#define NVOBJ_FLAG_VM_USER (1 << 4)
181
182#define NVOBJ_CINST_GLOBAL 0xdeadbeef
183
184struct nouveau_gpuobj {
185 struct drm_device *dev;
186 struct kref refcount;
187 struct list_head list;
188
189 void *node;
190 u32 *suspend;
191
192 uint32_t flags;
193
194 u32 size;
195 u32 pinst; /* PRAMIN BAR offset */
196 u32 cinst; /* Channel offset */
197 u64 vinst; /* VRAM address */
198 u64 linst; /* VM address */
199
200 uint32_t engine;
201 uint32_t class;
202
203 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
204 void *priv;
205};
206
207struct nouveau_page_flip_state {
208 struct list_head head;
209 struct drm_pending_vblank_event *event;
210 int crtc, bpp, pitch, x, y;
211 uint64_t offset;
212};
213
214enum nouveau_channel_mutex_class {
215 NOUVEAU_UCHANNEL_MUTEX,
216 NOUVEAU_KCHANNEL_MUTEX
217};
218
219struct nouveau_channel {
220 struct drm_device *dev;
221 struct list_head list;
222 int id;
223
224 /* references to the channel data structure */
225 struct kref ref;
226 /* users of the hardware channel resources, the hardware
227 * context will be kicked off when it reaches zero. */
228 atomic_t users;
229 struct mutex mutex;
230
231 /* owner of this fifo */
232 struct drm_file *file_priv;
233 /* mapping of the fifo itself */
234 struct drm_local_map *map;
235
236 /* mapping of the regs controlling the fifo */
237 void __iomem *user;
238 uint32_t user_get;
239 uint32_t user_get_hi;
240 uint32_t user_put;
241
242 /* DMA push buffer */
243 struct nouveau_gpuobj *pushbuf;
244 struct nouveau_bo *pushbuf_bo;
245 struct nouveau_vma pushbuf_vma;
246 uint64_t pushbuf_base;
247
248 /* Notifier memory */
249 struct nouveau_bo *notifier_bo;
250 struct nouveau_vma notifier_vma;
251 struct drm_mm notifier_heap;
252
253 /* PFIFO context */
254 struct nouveau_gpuobj *ramfc;
255
256 /* Execution engine contexts */
257 void *engctx[NVOBJ_ENGINE_NR];
258
259 /* NV50 VM */
260 struct nouveau_vm *vm;
261 struct nouveau_gpuobj *vm_pd;
262
263 /* Objects */
264 struct nouveau_gpuobj *ramin; /* Private instmem */
265 struct drm_mm ramin_heap; /* Private PRAMIN heap */
266 struct nouveau_ramht *ramht; /* Hash table */
267
268 /* GPU object info for stuff used in-kernel (mm_enabled) */
269 uint32_t m2mf_ntfy;
270 uint32_t vram_handle;
271 uint32_t gart_handle;
272 bool accel_done;
273
274 /* Push buffer state (only for drm's channel on !mm_enabled) */
275 struct {
276 int max;
277 int free;
278 int cur;
279 int put;
280 /* access via pushbuf_bo */
281
282 int ib_base;
283 int ib_max;
284 int ib_free;
285 int ib_put;
286 } dma;
287
288 struct {
289 bool active;
290 char name[32];
291 struct drm_info_list info;
292 } debugfs;
293};
294
295struct nouveau_exec_engine {
296 void (*destroy)(struct drm_device *, int engine);
297 int (*init)(struct drm_device *, int engine);
298 int (*fini)(struct drm_device *, int engine, bool suspend);
299 int (*context_new)(struct nouveau_channel *, int engine);
300 void (*context_del)(struct nouveau_channel *, int engine);
301 int (*object_new)(struct nouveau_channel *, int engine,
302 u32 handle, u16 class);
303 void (*set_tile_region)(struct drm_device *dev, int i);
304 void (*tlb_flush)(struct drm_device *, int engine);
305};
306
307struct nouveau_instmem_engine {
308 void *priv;
309
310 int (*init)(struct drm_device *dev);
311 void (*takedown)(struct drm_device *dev);
312 int (*suspend)(struct drm_device *dev);
313 void (*resume)(struct drm_device *dev);
314
315 int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
316 u32 size, u32 align);
317 void (*put)(struct nouveau_gpuobj *);
318 int (*map)(struct nouveau_gpuobj *);
319 void (*unmap)(struct nouveau_gpuobj *);
320
321 void (*flush)(struct drm_device *);
322};
323
324struct nouveau_mc_engine {
325 int (*init)(struct drm_device *dev);
326 void (*takedown)(struct drm_device *dev);
327};
328
329struct nouveau_timer_engine {
330 int (*init)(struct drm_device *dev);
331 void (*takedown)(struct drm_device *dev);
332 uint64_t (*read)(struct drm_device *dev);
333};
334
335struct nouveau_fb_engine {
336 int num_tiles;
337 struct drm_mm tag_heap;
338 void *priv;
339
340 int (*init)(struct drm_device *dev);
341 void (*takedown)(struct drm_device *dev);
342
343 void (*init_tile_region)(struct drm_device *dev, int i,
344 uint32_t addr, uint32_t size,
345 uint32_t pitch, uint32_t flags);
346 void (*set_tile_region)(struct drm_device *dev, int i);
347 void (*free_tile_region)(struct drm_device *dev, int i);
348};
349
350struct nouveau_display_engine {
351 void *priv;
352 int (*early_init)(struct drm_device *);
353 void (*late_takedown)(struct drm_device *);
354 int (*create)(struct drm_device *);
355 void (*destroy)(struct drm_device *);
356 int (*init)(struct drm_device *);
357 void (*fini)(struct drm_device *);
358
359 struct drm_property *dithering_mode;
360 struct drm_property *dithering_depth;
361 struct drm_property *underscan_property;
362 struct drm_property *underscan_hborder_property;
363 struct drm_property *underscan_vborder_property;
364 /* not really hue and saturation: */
365 struct drm_property *vibrant_hue_property;
366 struct drm_property *color_vibrance_property;
367};
368
369struct nouveau_gpio_engine {
370 spinlock_t lock;
371 struct list_head isr;
372 int (*init)(struct drm_device *);
373 void (*fini)(struct drm_device *);
374 int (*drive)(struct drm_device *, int line, int dir, int out);
375 int (*sense)(struct drm_device *, int line);
376 void (*irq_enable)(struct drm_device *, int line, bool);
377};
378
379struct nouveau_pm_voltage_level {
380 u32 voltage; /* microvolts */
381 u8 vid;
382};
383
384struct nouveau_pm_voltage {
385 bool supported;
386 u8 version;
387 u8 vid_mask;
388
389 struct nouveau_pm_voltage_level *level;
390 int nr_level;
391};
392
393/* Exclusive upper limits */
394#define NV_MEM_CL_DDR2_MAX 8
395#define NV_MEM_WR_DDR2_MAX 9
396#define NV_MEM_CL_DDR3_MAX 17
397#define NV_MEM_WR_DDR3_MAX 17
398#define NV_MEM_CL_GDDR3_MAX 16
399#define NV_MEM_WR_GDDR3_MAX 18
400#define NV_MEM_CL_GDDR5_MAX 21
401#define NV_MEM_WR_GDDR5_MAX 20
402
403struct nouveau_pm_memtiming {
404 int id;
405
406 u32 reg[9];
407 u32 mr[4];
408
409 u8 tCWL;
410
411 u8 odt;
412 u8 drive_strength;
413};
414
415struct nouveau_pm_tbl_header {
416 u8 version;
417 u8 header_len;
418 u8 entry_cnt;
419 u8 entry_len;
420};
421
422struct nouveau_pm_tbl_entry {
423 u8 tWR;
424 u8 tWTR;
425 u8 tCL;
426 u8 tRC;
427 u8 empty_4;
428 u8 tRFC; /* Byte 5 */
429 u8 empty_6;
430 u8 tRAS; /* Byte 7 */
431 u8 empty_8;
432 u8 tRP; /* Byte 9 */
433 u8 tRCDRD;
434 u8 tRCDWR;
435 u8 tRRD;
436 u8 tUNK_13;
437 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
438 u8 empty_15;
439 u8 tUNK_16;
440 u8 empty_17;
441 u8 tUNK_18;
442 u8 tCWL;
443 u8 tUNK_20, tUNK_21;
444};
445
446struct nouveau_pm_profile;
447struct nouveau_pm_profile_func {
448 void (*destroy)(struct nouveau_pm_profile *);
449 void (*init)(struct nouveau_pm_profile *);
450 void (*fini)(struct nouveau_pm_profile *);
451 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
452};
453
454struct nouveau_pm_profile {
455 const struct nouveau_pm_profile_func *func;
456 struct list_head head;
457 char name[8];
458};
459
460#define NOUVEAU_PM_MAX_LEVEL 8
461struct nouveau_pm_level {
462 struct nouveau_pm_profile profile;
463 struct device_attribute dev_attr;
464 char name[32];
465 int id;
466
467 struct nouveau_pm_memtiming timing;
468 u32 memory;
469 u16 memscript;
470
471 u32 core;
472 u32 shader;
473 u32 rop;
474 u32 copy;
475 u32 daemon;
476 u32 vdec;
477 u32 dom6;
478 u32 unka0; /* nva3:nvc0 */
479 u32 hub01; /* nvc0- */
480 u32 hub06; /* nvc0- */
481 u32 hub07; /* nvc0- */
482
483 u32 volt_min; /* microvolts */
484 u32 volt_max;
485 u8 fanspeed;
486};
487
488struct nouveau_pm_temp_sensor_constants {
489 u16 offset_constant;
490 s16 offset_mult;
491 s16 offset_div;
492 s16 slope_mult;
493 s16 slope_div;
494};
495
496struct nouveau_pm_threshold_temp {
497 s16 critical;
498 s16 down_clock;
499 s16 fan_boost;
500};
501
502struct nouveau_pm_fan {
503 u32 percent;
504 u32 min_duty;
505 u32 max_duty;
506 u32 pwm_freq;
507 u32 pwm_divisor;
508};
509
510struct nouveau_pm_engine {
511 struct nouveau_pm_voltage voltage;
512 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
513 int nr_perflvl;
514 struct nouveau_pm_temp_sensor_constants sensor_constants;
515 struct nouveau_pm_threshold_temp threshold_temp;
516 struct nouveau_pm_fan fan;
517
518 struct nouveau_pm_profile *profile_ac;
519 struct nouveau_pm_profile *profile_dc;
520 struct nouveau_pm_profile *profile;
521 struct list_head profiles;
522
523 struct nouveau_pm_level boot;
524 struct nouveau_pm_level *cur;
525
526 struct device *hwmon;
527 struct notifier_block acpi_nb;
528
529 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
530 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
531 int (*clocks_set)(struct drm_device *, void *);
532
533 int (*voltage_get)(struct drm_device *);
534 int (*voltage_set)(struct drm_device *, int voltage);
535 int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
536 int (*pwm_set)(struct drm_device *, int line, u32, u32);
537 int (*temp_get)(struct drm_device *);
538};
539
540struct nouveau_vram_engine {
541 struct nouveau_mm mm;
542
543 int (*init)(struct drm_device *);
544 void (*takedown)(struct drm_device *dev);
545 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
546 u32 type, struct nouveau_mem **);
547 void (*put)(struct drm_device *, struct nouveau_mem **);
548
549 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
550};
551
552struct nouveau_engine {
553 struct nouveau_instmem_engine instmem;
554 struct nouveau_mc_engine mc;
555 struct nouveau_timer_engine timer;
556 struct nouveau_fb_engine fb;
557 struct nouveau_display_engine display;
558 struct nouveau_gpio_engine gpio;
559 struct nouveau_pm_engine pm;
560 struct nouveau_vram_engine vram;
561};
562
563struct nouveau_pll_vals {
564 union {
565 struct {
566#ifdef __BIG_ENDIAN
567 uint8_t N1, M1, N2, M2;
568#else
569 uint8_t M1, N1, M2, N2;
570#endif
571 };
572 struct {
573 uint16_t NM1, NM2;
574 } __attribute__((packed));
575 };
576 int log2P;
577
578 int refclk;
579};
580
581enum nv04_fp_display_regs {
582 FP_DISPLAY_END,
583 FP_TOTAL,
584 FP_CRTC,
585 FP_SYNC_START,
586 FP_SYNC_END,
587 FP_VALID_START,
588 FP_VALID_END
589};
590
591struct nv04_crtc_reg {
592 unsigned char MiscOutReg;
593 uint8_t CRTC[0xa0];
594 uint8_t CR58[0x10];
595 uint8_t Sequencer[5];
596 uint8_t Graphics[9];
597 uint8_t Attribute[21];
598 unsigned char DAC[768];
599
600 /* PCRTC regs */
601 uint32_t fb_start;
602 uint32_t crtc_cfg;
603 uint32_t cursor_cfg;
604 uint32_t gpio_ext;
605 uint32_t crtc_830;
606 uint32_t crtc_834;
607 uint32_t crtc_850;
608 uint32_t crtc_eng_ctrl;
609
610 /* PRAMDAC regs */
611 uint32_t nv10_cursync;
612 struct nouveau_pll_vals pllvals;
613 uint32_t ramdac_gen_ctrl;
614 uint32_t ramdac_630;
615 uint32_t ramdac_634;
616 uint32_t tv_setup;
617 uint32_t tv_vtotal;
618 uint32_t tv_vskew;
619 uint32_t tv_vsync_delay;
620 uint32_t tv_htotal;
621 uint32_t tv_hskew;
622 uint32_t tv_hsync_delay;
623 uint32_t tv_hsync_delay2;
624 uint32_t fp_horiz_regs[7];
625 uint32_t fp_vert_regs[7];
626 uint32_t dither;
627 uint32_t fp_control;
628 uint32_t dither_regs[6];
629 uint32_t fp_debug_0;
630 uint32_t fp_debug_1;
631 uint32_t fp_debug_2;
632 uint32_t fp_margin_color;
633 uint32_t ramdac_8c0;
634 uint32_t ramdac_a20;
635 uint32_t ramdac_a24;
636 uint32_t ramdac_a34;
637 uint32_t ctv_regs[38];
638};
639
640struct nv04_output_reg {
641 uint32_t output;
642 int head;
643};
644
645struct nv04_mode_state {
646 struct nv04_crtc_reg crtc_reg[2];
647 uint32_t pllsel;
648 uint32_t sel_clk;
649};
650
651enum nouveau_card_type {
652 NV_04 = 0x04,
653 NV_10 = 0x10,
654 NV_20 = 0x20,
655 NV_30 = 0x30,
656 NV_40 = 0x40,
657 NV_50 = 0x50,
658 NV_C0 = 0xc0,
659 NV_D0 = 0xd0,
660 NV_E0 = 0xe0,
661};
662
663struct drm_nouveau_private {
664 struct drm_device *dev;
665 bool noaccel;
666
667 /* the card type, takes NV_* as values */
668 enum nouveau_card_type card_type;
669 /* exact chipset, derived from NV_PMC_BOOT_0 */
670 int chipset;
671 int flags;
672 u32 crystal;
673
674 void __iomem *mmio;
675
676 spinlock_t ramin_lock;
677 void __iomem *ramin;
678 u32 ramin_size;
679 u32 ramin_base;
680 bool ramin_available;
681 struct drm_mm ramin_heap;
682 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
683 struct list_head gpuobj_list;
684 struct list_head classes;
685
686 struct nouveau_bo *vga_ram;
687
688 /* interrupt handling */
689 void (*irq_handler[32])(struct drm_device *);
690 bool msi_enabled;
691
692 struct {
693 struct drm_global_reference mem_global_ref;
694 struct ttm_bo_global_ref bo_global_ref;
695 struct ttm_bo_device bdev;
696 atomic_t validate_sequence;
697 int (*move)(struct nouveau_channel *,
698 struct ttm_buffer_object *,
699 struct ttm_mem_reg *, struct ttm_mem_reg *);
700 } ttm;
701
702 struct {
703 spinlock_t lock;
704 struct drm_mm heap;
705 struct nouveau_bo *bo;
706 } fence;
707
708 struct {
709 spinlock_t lock;
710 struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
711 } channels;
712
713 struct nouveau_engine engine;
714 struct nouveau_channel *channel;
715
716 /* For PFIFO and PGRAPH. */
717 spinlock_t context_switch_lock;
718
719 /* VM/PRAMIN flush, legacy PRAMIN aperture */
720 spinlock_t vm_lock;
721
722 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
723 struct nouveau_ramht *ramht;
724 struct nouveau_gpuobj *ramfc;
725 struct nouveau_gpuobj *ramro;
726
727 uint32_t ramin_rsvd_vram;
728
729 struct {
730 enum {
731 NOUVEAU_GART_NONE = 0,
732 NOUVEAU_GART_AGP, /* AGP */
733 NOUVEAU_GART_PDMA, /* paged dma object */
734 NOUVEAU_GART_HW /* on-chip gart/vm */
735 } type;
736 uint64_t aper_base;
737 uint64_t aper_size;
738 uint64_t aper_free;
739
740 struct ttm_backend_func *func;
741
742 struct {
743 struct page *page;
744 dma_addr_t addr;
745 } dummy;
746
747 struct nouveau_gpuobj *sg_ctxdma;
748 } gart_info;
749
750 /* nv10-nv40 tiling regions */
751 struct {
752 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
753 spinlock_t lock;
754 } tile;
755
756 /* VRAM/fb configuration */
757 enum {
758 NV_MEM_TYPE_UNKNOWN = 0,
759 NV_MEM_TYPE_STOLEN,
760 NV_MEM_TYPE_SGRAM,
761 NV_MEM_TYPE_SDRAM,
762 NV_MEM_TYPE_DDR1,
763 NV_MEM_TYPE_DDR2,
764 NV_MEM_TYPE_DDR3,
765 NV_MEM_TYPE_GDDR2,
766 NV_MEM_TYPE_GDDR3,
767 NV_MEM_TYPE_GDDR4,
768 NV_MEM_TYPE_GDDR5
769 } vram_type;
770 uint64_t vram_size;
771 uint64_t vram_sys_base;
772 bool vram_rank_B;
773
774 uint64_t fb_available_size;
775 uint64_t fb_mappable_pages;
776 uint64_t fb_aper_free;
777 int fb_mtrr;
778
779 /* BAR control (NV50-) */
780 struct nouveau_vm *bar1_vm;
781 struct nouveau_vm *bar3_vm;
782
783 /* G8x/G9x virtual address space */
784 struct nouveau_vm *chan_vm;
785
786 struct nvbios vbios;
787 u8 *mxms;
788 struct list_head i2c_ports;
789
790 struct nv04_mode_state mode_reg;
791 struct nv04_mode_state saved_reg;
792 uint32_t saved_vga_font[4][16384];
793 uint32_t crtc_owner;
794 uint32_t dac_users[4];
795
796 struct backlight_device *backlight;
797
798 struct {
799 struct dentry *channel_root;
800 } debugfs;
801
802 struct nouveau_fbdev *nfbdev;
803 struct apertures_struct *apertures;
804};
805
806static inline struct drm_nouveau_private *
807nouveau_private(struct drm_device *dev)
808{
809 return dev->dev_private;
810}
811
812static inline struct drm_nouveau_private *
813nouveau_bdev(struct ttm_bo_device *bd)
814{
815 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
816}
817
818static inline int
819nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
820{
821 struct nouveau_bo *prev;
822
823 if (!pnvbo)
824 return -EINVAL;
825 prev = *pnvbo;
826
827 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
828 if (prev) {
829 struct ttm_buffer_object *bo = &prev->bo;
830
831 ttm_bo_unref(&bo);
832 }
833
834 return 0;
835}
836
837/* nouveau_drv.c */
838extern int nouveau_modeset;
839extern int nouveau_agpmode;
840extern int nouveau_duallink;
841extern int nouveau_uscript_lvds;
842extern int nouveau_uscript_tmds;
843extern int nouveau_vram_pushbuf;
844extern int nouveau_vram_notify;
845extern char *nouveau_vram_type;
846extern int nouveau_fbpercrtc;
847extern int nouveau_tv_disable;
848extern char *nouveau_tv_norm;
849extern int nouveau_reg_debug;
850extern char *nouveau_vbios;
851extern int nouveau_ignorelid;
852extern int nouveau_nofbaccel;
853extern int nouveau_noaccel;
854extern int nouveau_force_post;
855extern int nouveau_override_conntype;
856extern char *nouveau_perflvl;
857extern int nouveau_perflvl_wr;
858extern int nouveau_msi;
859extern int nouveau_ctxfw;
860extern int nouveau_mxmdcb;
861
862extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
863extern int nouveau_pci_resume(struct pci_dev *pdev);
864
865/* nouveau_state.c */
866extern int nouveau_open(struct drm_device *, struct drm_file *);
867extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
868extern void nouveau_postclose(struct drm_device *, struct drm_file *);
869extern int nouveau_load(struct drm_device *, unsigned long flags);
870extern int nouveau_firstopen(struct drm_device *);
871extern void nouveau_lastclose(struct drm_device *);
872extern int nouveau_unload(struct drm_device *);
873extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
874 uint32_t reg, uint32_t mask, uint32_t val);
875extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
876 uint32_t reg, uint32_t mask, uint32_t val);
877extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
878 bool (*cond)(void *), void *);
879extern bool nouveau_wait_for_idle(struct drm_device *);
880extern int nouveau_card_init(struct drm_device *);
881
882/* nouveau_mem.c */
883extern int nouveau_mem_vram_init(struct drm_device *);
884extern void nouveau_mem_vram_fini(struct drm_device *);
885extern int nouveau_mem_gart_init(struct drm_device *);
886extern void nouveau_mem_gart_fini(struct drm_device *);
887extern int nouveau_mem_init_agp(struct drm_device *);
888extern int nouveau_mem_reset_agp(struct drm_device *);
889extern void nouveau_mem_close(struct drm_device *);
890extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
891extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
892 struct nouveau_pm_memtiming *);
893extern void nouveau_mem_timing_read(struct drm_device *,
894 struct nouveau_pm_memtiming *);
895extern int nouveau_mem_vbios_type(struct drm_device *);
896extern struct nouveau_tile_reg *nv10_mem_set_tiling(
897 struct drm_device *dev, uint32_t addr, uint32_t size,
898 uint32_t pitch, uint32_t flags);
899extern void nv10_mem_put_tile_region(struct drm_device *dev,
900 struct nouveau_tile_reg *tile,
901 struct nouveau_fence *fence);
902extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
903extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
904
905/* nouveau_notifier.c */
906extern int nouveau_notifier_init_channel(struct nouveau_channel *);
907extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
908extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
909 int cout, uint32_t start, uint32_t end,
910 uint32_t *offset);
911
912/* nouveau_channel.c */
913extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
914extern int nouveau_channel_alloc(struct drm_device *dev,
915 struct nouveau_channel **chan,
916 struct drm_file *file_priv,
917 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
918extern struct nouveau_channel *
919nouveau_channel_get_unlocked(struct nouveau_channel *);
920extern struct nouveau_channel *
921nouveau_channel_get(struct drm_file *, int id);
922extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
923extern void nouveau_channel_put(struct nouveau_channel **);
924extern void nouveau_channel_ref(struct nouveau_channel *chan,
925 struct nouveau_channel **pchan);
926extern int nouveau_channel_idle(struct nouveau_channel *chan);
927
928/* nouveau_gpuobj.c */
929#define NVOBJ_ENGINE_ADD(d, e, p) do { \
930 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
931 dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
932} while (0)
933
934#define NVOBJ_ENGINE_DEL(d, e) do { \
935 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
936 dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
937} while (0)
938
939#define NVOBJ_CLASS(d, c, e) do { \
940 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
941 if (ret) \
942 return ret; \
943} while (0)
944
945#define NVOBJ_MTHD(d, c, m, e) do { \
946 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
947 if (ret) \
948 return ret; \
949} while (0)
950
951extern int nouveau_gpuobj_early_init(struct drm_device *);
952extern int nouveau_gpuobj_init(struct drm_device *);
953extern void nouveau_gpuobj_takedown(struct drm_device *);
954extern int nouveau_gpuobj_suspend(struct drm_device *dev);
955extern void nouveau_gpuobj_resume(struct drm_device *dev);
956extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
957extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
958 int (*exec)(struct nouveau_channel *,
959 u32 class, u32 mthd, u32 data));
960extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
961extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
962extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
963 uint32_t vram_h, uint32_t tt_h);
964extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
965extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
966 uint32_t size, int align, uint32_t flags,
967 struct nouveau_gpuobj **);
968extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
969 struct nouveau_gpuobj **);
970extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
971 u32 size, u32 flags,
972 struct nouveau_gpuobj **);
973extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
974 uint64_t offset, uint64_t size, int access,
975 int target, struct nouveau_gpuobj **);
976extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
977extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
978 u64 size, int target, int access, u32 type,
979 u32 comp, struct nouveau_gpuobj **pobj);
980extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
981 int class, u64 base, u64 size, int target,
982 int access, u32 type, u32 comp);
983
984/* nouveau_irq.c */
985extern int nouveau_irq_init(struct drm_device *);
986extern void nouveau_irq_fini(struct drm_device *);
987extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
988extern void nouveau_irq_register(struct drm_device *, int status_bit,
989 void (*)(struct drm_device *));
990extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
991extern void nouveau_irq_preinstall(struct drm_device *);
992extern int nouveau_irq_postinstall(struct drm_device *);
993extern void nouveau_irq_uninstall(struct drm_device *);
994
995/* nouveau_sgdma.c */
996extern int nouveau_sgdma_init(struct drm_device *);
997extern void nouveau_sgdma_takedown(struct drm_device *);
998extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
999 uint32_t offset);
1000extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
1001 unsigned long size,
1002 uint32_t page_flags,
1003 struct page *dummy_read_page);
1004
1005/* nouveau_debugfs.c */
1006#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
1007extern int nouveau_debugfs_init(struct drm_minor *);
1008extern void nouveau_debugfs_takedown(struct drm_minor *);
1009extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
1010extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
1011#else
1012static inline int
1013nouveau_debugfs_init(struct drm_minor *minor)
1014{
1015 return 0;
1016}
1017
1018static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
1019{
1020}
1021
1022static inline int
1023nouveau_debugfs_channel_init(struct nouveau_channel *chan)
1024{
1025 return 0;
1026}
1027
1028static inline void
1029nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
1030{
1031}
1032#endif
1033
1034/* nouveau_dma.c */
1035extern void nouveau_dma_init(struct nouveau_channel *);
1036extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
1037
1038/* nouveau_acpi.c */
1039#define ROM_BIOS_PAGE 4096
1040#if defined(CONFIG_ACPI)
1041void nouveau_register_dsm_handler(void);
1042void nouveau_unregister_dsm_handler(void);
1043void nouveau_switcheroo_optimus_dsm(void);
1044int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
1045bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
1046int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
1047#else
1048static inline void nouveau_register_dsm_handler(void) {}
1049static inline void nouveau_unregister_dsm_handler(void) {}
1050static inline void nouveau_switcheroo_optimus_dsm(void) {}
1051static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
1052static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
1053static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
1054#endif
1055
1056/* nouveau_backlight.c */
1057#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1058extern int nouveau_backlight_init(struct drm_device *);
1059extern void nouveau_backlight_exit(struct drm_device *);
1060#else
1061static inline int nouveau_backlight_init(struct drm_device *dev)
1062{
1063 return 0;
1064}
1065
1066static inline void nouveau_backlight_exit(struct drm_device *dev) { }
1067#endif
1068
1069/* nouveau_bios.c */
1070extern int nouveau_bios_init(struct drm_device *);
1071extern void nouveau_bios_takedown(struct drm_device *dev);
1072extern int nouveau_run_vbios_init(struct drm_device *);
1073extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
1074 struct dcb_entry *, int crtc);
1075extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
1076extern struct dcb_connector_table_entry *
1077nouveau_bios_connector_entry(struct drm_device *, int index);
1078extern u32 get_pll_register(struct drm_device *, enum pll_types);
1079extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
1080 struct pll_lims *);
1081extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
1082 struct dcb_entry *, int crtc);
1083extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
1084extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
1085extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
1086 bool *dl, bool *if_is_24bit);
1087extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
1088 int head, int pxclk);
1089extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
1090 enum LVDS_script, int pxclk);
1091bool bios_encoder_match(struct dcb_entry *, u32 hash);
1092
1093/* nouveau_mxm.c */
1094int nouveau_mxm_init(struct drm_device *dev);
1095void nouveau_mxm_fini(struct drm_device *dev);
1096
1097/* nouveau_ttm.c */
1098int nouveau_ttm_global_init(struct drm_nouveau_private *);
1099void nouveau_ttm_global_release(struct drm_nouveau_private *);
1100int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
1101
1102/* nouveau_hdmi.c */
1103void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
1104
1105/* nv04_fb.c */
1106extern int nv04_fb_vram_init(struct drm_device *);
1107extern int nv04_fb_init(struct drm_device *);
1108extern void nv04_fb_takedown(struct drm_device *);
1109
1110/* nv10_fb.c */
1111extern int nv10_fb_vram_init(struct drm_device *dev);
1112extern int nv1a_fb_vram_init(struct drm_device *dev);
1113extern int nv10_fb_init(struct drm_device *);
1114extern void nv10_fb_takedown(struct drm_device *);
1115extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
1116 uint32_t addr, uint32_t size,
1117 uint32_t pitch, uint32_t flags);
1118extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
1119extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
1120
1121/* nv20_fb.c */
1122extern int nv20_fb_vram_init(struct drm_device *dev);
1123extern int nv20_fb_init(struct drm_device *);
1124extern void nv20_fb_takedown(struct drm_device *);
1125extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
1126 uint32_t addr, uint32_t size,
1127 uint32_t pitch, uint32_t flags);
1128extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
1129extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
1130
1131/* nv30_fb.c */
1132extern int nv30_fb_init(struct drm_device *);
1133extern void nv30_fb_takedown(struct drm_device *);
1134extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
1135 uint32_t addr, uint32_t size,
1136 uint32_t pitch, uint32_t flags);
1137extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
1138
1139/* nv40_fb.c */
1140extern int nv40_fb_vram_init(struct drm_device *dev);
1141extern int nv40_fb_init(struct drm_device *);
1142extern void nv40_fb_takedown(struct drm_device *);
1143extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
1144
1145/* nv50_fb.c */
1146extern int nv50_fb_init(struct drm_device *);
1147extern void nv50_fb_takedown(struct drm_device *);
1148extern void nv50_fb_vm_trap(struct drm_device *, int display);
1149
1150/* nvc0_fb.c */
1151extern int nvc0_fb_init(struct drm_device *);
1152extern void nvc0_fb_takedown(struct drm_device *);
1153
1154/* nv04_graph.c */
1155extern int nv04_graph_create(struct drm_device *);
1156extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1157extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1158 u32 class, u32 mthd, u32 data);
1159extern struct nouveau_bitfield nv04_graph_nsource[];
1160
1161/* nv10_graph.c */
1162extern int nv10_graph_create(struct drm_device *);
1163extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
1164extern struct nouveau_bitfield nv10_graph_intr[];
1165extern struct nouveau_bitfield nv10_graph_nstatus[];
1166
1167/* nv20_graph.c */
1168extern int nv20_graph_create(struct drm_device *);
1169
1170/* nv40_graph.c */
1171extern int nv40_graph_create(struct drm_device *);
1172extern void nv40_grctx_init(struct drm_device *, u32 *size);
1173extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1174
1175/* nv50_graph.c */
1176extern int nv50_graph_create(struct drm_device *);
1177extern struct nouveau_enum nv50_data_error_names[];
1178extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1179extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
1180extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1181
1182/* nvc0_graph.c */
1183extern int nvc0_graph_create(struct drm_device *);
1184extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1185
1186/* nve0_graph.c */
1187extern int nve0_graph_create(struct drm_device *);
1188
1189/* nv84_crypt.c */
1190extern int nv84_crypt_create(struct drm_device *);
1191
1192/* nv98_crypt.c */
1193extern int nv98_crypt_create(struct drm_device *dev);
1194
1195/* nva3_copy.c */
1196extern int nva3_copy_create(struct drm_device *dev);
1197
1198/* nvc0_copy.c */
1199extern int nvc0_copy_create(struct drm_device *dev, int engine);
1200
1201/* nv31_mpeg.c */
1202extern int nv31_mpeg_create(struct drm_device *dev);
1203
1204/* nv50_mpeg.c */
1205extern int nv50_mpeg_create(struct drm_device *dev);
1206
1207/* nv84_bsp.c */
1208/* nv98_bsp.c */
1209extern int nv84_bsp_create(struct drm_device *dev);
1210
1211/* nv84_vp.c */
1212/* nv98_vp.c */
1213extern int nv84_vp_create(struct drm_device *dev);
1214
1215/* nv98_ppp.c */
1216extern int nv98_ppp_create(struct drm_device *dev);
1217
1218/* nv04_instmem.c */
1219extern int nv04_instmem_init(struct drm_device *);
1220extern void nv04_instmem_takedown(struct drm_device *);
1221extern int nv04_instmem_suspend(struct drm_device *);
1222extern void nv04_instmem_resume(struct drm_device *);
1223extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1224 u32 size, u32 align);
1225extern void nv04_instmem_put(struct nouveau_gpuobj *);
1226extern int nv04_instmem_map(struct nouveau_gpuobj *);
1227extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
1228extern void nv04_instmem_flush(struct drm_device *);
1229
1230/* nv50_instmem.c */
1231extern int nv50_instmem_init(struct drm_device *);
1232extern void nv50_instmem_takedown(struct drm_device *);
1233extern int nv50_instmem_suspend(struct drm_device *);
1234extern void nv50_instmem_resume(struct drm_device *);
1235extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1236 u32 size, u32 align);
1237extern void nv50_instmem_put(struct nouveau_gpuobj *);
1238extern int nv50_instmem_map(struct nouveau_gpuobj *);
1239extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
1240extern void nv50_instmem_flush(struct drm_device *);
1241extern void nv84_instmem_flush(struct drm_device *);
1242
1243/* nvc0_instmem.c */
1244extern int nvc0_instmem_init(struct drm_device *);
1245extern void nvc0_instmem_takedown(struct drm_device *);
1246extern int nvc0_instmem_suspend(struct drm_device *);
1247extern void nvc0_instmem_resume(struct drm_device *);
1248
1249/* nv04_mc.c */
1250extern int nv04_mc_init(struct drm_device *);
1251extern void nv04_mc_takedown(struct drm_device *);
1252
1253/* nv40_mc.c */
1254extern int nv40_mc_init(struct drm_device *);
1255extern void nv40_mc_takedown(struct drm_device *);
1256
1257/* nv50_mc.c */
1258extern int nv50_mc_init(struct drm_device *);
1259extern void nv50_mc_takedown(struct drm_device *);
1260
1261/* nv04_timer.c */
1262extern int nv04_timer_init(struct drm_device *);
1263extern uint64_t nv04_timer_read(struct drm_device *);
1264extern void nv04_timer_takedown(struct drm_device *);
1265
1266extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1267 unsigned long arg);
1268
1269/* nv04_dac.c */
1270extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *);
1271extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
1272extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1273extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1274extern bool nv04_dac_in_use(struct drm_encoder *encoder);
1275
1276/* nv04_dfp.c */
1277extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *);
1278extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
1279extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
1280 int head, bool dl);
1281extern void nv04_dfp_disable(struct drm_device *dev, int head);
1282extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
1283
1284/* nv04_tv.c */
1285extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
1286extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *);
1287
1288/* nv17_tv.c */
1289extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
1290
1291/* nv04_display.c */
1292extern int nv04_display_early_init(struct drm_device *);
1293extern void nv04_display_late_takedown(struct drm_device *);
1294extern int nv04_display_create(struct drm_device *);
1295extern void nv04_display_destroy(struct drm_device *);
1296extern int nv04_display_init(struct drm_device *);
1297extern void nv04_display_fini(struct drm_device *);
1298
1299/* nvd0_display.c */
1300extern int nvd0_display_create(struct drm_device *);
1301extern void nvd0_display_destroy(struct drm_device *);
1302extern int nvd0_display_init(struct drm_device *);
1303extern void nvd0_display_fini(struct drm_device *);
1304struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
1305void nvd0_display_flip_stop(struct drm_crtc *);
1306int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
1307 struct nouveau_channel *, u32 swap_interval);
1308
1309/* nv04_crtc.c */
1310extern int nv04_crtc_create(struct drm_device *, int index);
1311
1312/* nouveau_bo.c */
1313extern struct ttm_bo_driver nouveau_bo_driver;
1314extern void nouveau_bo_move_init(struct nouveau_channel *);
1315extern int nouveau_bo_new(struct drm_device *, int size, int align,
1316 uint32_t flags, uint32_t tile_mode,
1317 uint32_t tile_flags,
1318 struct sg_table *sg,
1319 struct nouveau_bo **);
1320extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1321extern int nouveau_bo_unpin(struct nouveau_bo *);
1322extern int nouveau_bo_map(struct nouveau_bo *);
1323extern void nouveau_bo_unmap(struct nouveau_bo *);
1324extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
1325 uint32_t busy);
1326extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1327extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1328extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1329extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1330extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1331extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1332 bool no_wait_reserve, bool no_wait_gpu);
1333
1334extern struct nouveau_vma *
1335nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
1336extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1337 struct nouveau_vma *);
1338extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1339
1340/* nouveau_gem.c */
1341extern int nouveau_gem_new(struct drm_device *, int size, int align,
1342 uint32_t domain, uint32_t tile_mode,
1343 uint32_t tile_flags, struct nouveau_bo **);
1344extern int nouveau_gem_object_new(struct drm_gem_object *);
1345extern void nouveau_gem_object_del(struct drm_gem_object *);
1346extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
1347extern void nouveau_gem_object_close(struct drm_gem_object *,
1348 struct drm_file *);
1349extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1350 struct drm_file *);
1351extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1352 struct drm_file *);
1353extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1354 struct drm_file *);
1355extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1356 struct drm_file *);
1357extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1358 struct drm_file *);
1359
1360extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
1361 struct drm_gem_object *obj, int flags);
1362extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
1363 struct dma_buf *dma_buf);
1364
1365/* nouveau_display.c */
1366int nouveau_display_create(struct drm_device *dev);
1367void nouveau_display_destroy(struct drm_device *dev);
1368int nouveau_display_init(struct drm_device *dev);
1369void nouveau_display_fini(struct drm_device *dev);
1370int nouveau_vblank_enable(struct drm_device *dev, int crtc);
1371void nouveau_vblank_disable(struct drm_device *dev, int crtc);
1372int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1373 struct drm_pending_vblank_event *event);
1374int nouveau_finish_page_flip(struct nouveau_channel *,
1375 struct nouveau_page_flip_state *);
1376int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1377 struct drm_mode_create_dumb *args);
1378int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1379 uint32_t handle, uint64_t *offset);
1380int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1381 uint32_t handle);
1382
1383/* nv10_gpio.c */
1384int nv10_gpio_init(struct drm_device *dev);
1385void nv10_gpio_fini(struct drm_device *dev);
1386int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1387int nv10_gpio_sense(struct drm_device *dev, int line);
1388void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
1389
1390/* nv50_gpio.c */
1391int nv50_gpio_init(struct drm_device *dev);
1392void nv50_gpio_fini(struct drm_device *dev);
1393int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1394int nv50_gpio_sense(struct drm_device *dev, int line);
1395void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
1396int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1397int nvd0_gpio_sense(struct drm_device *dev, int line);
1398
1399/* nv50_calc.c */
1400int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1401 int *N1, int *M1, int *N2, int *M2, int *P);
1402int nva3_calc_pll(struct drm_device *, struct pll_lims *,
1403 int clk, int *N, int *fN, int *M, int *P);
1404
1405#ifndef ioread32_native
1406#ifdef __BIG_ENDIAN
1407#define ioread16_native ioread16be
1408#define iowrite16_native iowrite16be
1409#define ioread32_native ioread32be
1410#define iowrite32_native iowrite32be
1411#else /* def __BIG_ENDIAN */
1412#define ioread16_native ioread16
1413#define iowrite16_native iowrite16
1414#define ioread32_native ioread32
1415#define iowrite32_native iowrite32
1416#endif /* def __BIG_ENDIAN else */
1417#endif /* !ioread32_native */
1418
1419/* channel control reg access */
1420static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
1421{
1422 return ioread32_native(chan->user + reg);
1423}
1424
1425static inline void nvchan_wr32(struct nouveau_channel *chan,
1426 unsigned reg, u32 val)
1427{
1428 iowrite32_native(val, chan->user + reg);
1429}
1430
1431/* register access */
1432static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
1433{
1434 struct drm_nouveau_private *dev_priv = dev->dev_private;
1435 return ioread32_native(dev_priv->mmio + reg);
1436}
1437
1438static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1439{
1440 struct drm_nouveau_private *dev_priv = dev->dev_private;
1441 iowrite32_native(val, dev_priv->mmio + reg);
1442}
1443
1444static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
1445{
1446 u32 tmp = nv_rd32(dev, reg);
1447 nv_wr32(dev, reg, (tmp & ~mask) | val);
1448 return tmp;
1449}
1450
1451static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
1452{
1453 struct drm_nouveau_private *dev_priv = dev->dev_private;
1454 return ioread8(dev_priv->mmio + reg);
1455}
1456
1457static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1458{
1459 struct drm_nouveau_private *dev_priv = dev->dev_private;
1460 iowrite8(val, dev_priv->mmio + reg);
1461}
1462
1463#define nv_wait(dev, reg, mask, val) \
1464 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
1465#define nv_wait_ne(dev, reg, mask, val) \
1466 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
1467#define nv_wait_cb(dev, func, data) \
1468 nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
1469
1470/* PRAMIN access */
1471static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1472{
1473 struct drm_nouveau_private *dev_priv = dev->dev_private;
1474 return ioread32_native(dev_priv->ramin + offset);
1475}
1476
1477static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1478{
1479 struct drm_nouveau_private *dev_priv = dev->dev_private;
1480 iowrite32_native(val, dev_priv->ramin + offset);
1481}
1482
1483/* object access */
1484extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
1485extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
1486
1487/*
1488 * Logging
1489 * Argument d is (struct drm_device *).
1490 */
1491#define NV_PRINTK(level, d, fmt, arg...) \
1492 printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
1493 pci_name(d->pdev), ##arg)
1494#ifndef NV_DEBUG_NOTRACE
1495#define NV_DEBUG(d, fmt, arg...) do { \
1496 if (drm_debug & DRM_UT_DRIVER) { \
1497 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1498 __LINE__, ##arg); \
1499 } \
1500} while (0)
1501#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1502 if (drm_debug & DRM_UT_KMS) { \
1503 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1504 __LINE__, ##arg); \
1505 } \
1506} while (0)
1507#else
1508#define NV_DEBUG(d, fmt, arg...) do { \
1509 if (drm_debug & DRM_UT_DRIVER) \
1510 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1511} while (0)
1512#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1513 if (drm_debug & DRM_UT_KMS) \
1514 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1515} while (0)
1516#endif
1517#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
1518#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1519#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1520#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1521#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1522#define NV_WARNONCE(d, fmt, arg...) do { \
1523 static int _warned = 0; \
1524 if (!_warned) { \
1525 NV_WARN(d, fmt, ##arg); \
1526 _warned = 1; \
1527 } \
1528} while(0)
1529
1530/* nouveau_reg_debug bitmask */
1531enum {
1532 NOUVEAU_REG_DEBUG_MC = 0x1,
1533 NOUVEAU_REG_DEBUG_VIDEO = 0x2,
1534 NOUVEAU_REG_DEBUG_FB = 0x4,
1535 NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
1536 NOUVEAU_REG_DEBUG_CRTC = 0x10,
1537 NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
1538 NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
1539 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1540 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1541 NOUVEAU_REG_DEBUG_EVO = 0x200,
1542 NOUVEAU_REG_DEBUG_AUXCH = 0x400
1543};
1544
1545#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
1546 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
1547 NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
1548} while (0)
1549
1550static inline bool
1551nv_two_heads(struct drm_device *dev)
1552{
1553 struct drm_nouveau_private *dev_priv = dev->dev_private;
1554 const int impl = dev->pci_device & 0x0ff0;
1555
1556 if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
1557 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
1558 return true;
1559
1560 return false;
1561}
1562
1563static inline bool
1564nv_gf4_disp_arch(struct drm_device *dev)
1565{
1566 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
1567}
1568
1569static inline bool
1570nv_two_reg_pll(struct drm_device *dev)
1571{
1572 struct drm_nouveau_private *dev_priv = dev->dev_private;
1573 const int impl = dev->pci_device & 0x0ff0;
1574
1575 if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
1576 return true;
1577 return false;
1578}
1579
1580static inline bool
1581nv_match_device(struct drm_device *dev, unsigned device,
1582 unsigned sub_vendor, unsigned sub_device)
1583{
1584 return dev->pdev->device == device &&
1585 dev->pdev->subsystem_vendor == sub_vendor &&
1586 dev->pdev->subsystem_device == sub_device;
1587}
1588
1589static inline void *
1590nv_engine(struct drm_device *dev, int engine)
1591{
1592 struct drm_nouveau_private *dev_priv = dev->dev_private;
1593 return (void *)dev_priv->eng[engine];
1594}
1595
1596/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1597 * helpful to determine a number of other hardware features
1598 */
1599static inline int
1600nv44_graph_class(struct drm_device *dev)
1601{
1602 struct drm_nouveau_private *dev_priv = dev->dev_private;
1603
1604 if ((dev_priv->chipset & 0xf0) == 0x60)
1605 return 1;
1606
1607 return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
1608}
1609
1610/* memory type/access flags, do not match hardware values */
1611#define NV_MEM_ACCESS_RO 1
1612#define NV_MEM_ACCESS_WO 2
1613#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
1614#define NV_MEM_ACCESS_SYS 4
1615#define NV_MEM_ACCESS_VM 8
1616#define NV_MEM_ACCESS_NOSNOOP 16
1617
1618#define NV_MEM_TARGET_VRAM 0
1619#define NV_MEM_TARGET_PCI 1
1620#define NV_MEM_TARGET_PCI_NOSNOOP 2
1621#define NV_MEM_TARGET_VM 3
1622#define NV_MEM_TARGET_GART 4
1623
1624#define NV_MEM_TYPE_VM 0x7f
1625#define NV_MEM_COMP_VM 0x03
1626
1627/* FIFO methods */
1628#define NV01_SUBCHAN_OBJECT 0x00000000
1629#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
1630#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
1631#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
1632#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
1633#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
1634#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
1635#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
1636#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
1637#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
1638#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
1639#define NV10_SUBCHAN_REF_CNT 0x00000050
1640#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
1641#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
1642#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
1643#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
1644#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
1645#define NV40_SUBCHAN_YIELD 0x00000080
1646
1647/* NV_SW object class */
1648#define NV_SW 0x0000506e
1649#define NV_SW_DMA_VBLSEM 0x0000018c
1650#define NV_SW_VBLSEM_OFFSET 0x00000400
1651#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1652#define NV_SW_VBLSEM_RELEASE 0x00000408
1653#define NV_SW_PAGE_FLIP 0x00000500
1654
1655#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 3dc14a3dcc4c..5b5d0148f8bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -27,23 +27,27 @@
27#ifndef __NOUVEAU_ENCODER_H__ 27#ifndef __NOUVEAU_ENCODER_H__
28#define __NOUVEAU_ENCODER_H__ 28#define __NOUVEAU_ENCODER_H__
29 29
30#include <subdev/bios/dcb.h>
31
30#include "drm_encoder_slave.h" 32#include "drm_encoder_slave.h"
31#include "nouveau_drv.h" 33#include "nv04_display.h"
32 34
33#define NV_DPMS_CLEARED 0x80 35#define NV_DPMS_CLEARED 0x80
34 36
37struct nouveau_i2c_port;
38
35struct dp_train_func { 39struct dp_train_func {
36 void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc, 40 void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
37 int nr, u32 bw, bool enhframe); 41 int nr, u32 bw, bool enhframe);
38 void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern); 42 void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
39 void (*train_adj)(struct drm_device *, struct dcb_entry *, 43 void (*train_adj)(struct drm_device *, struct dcb_output *,
40 u8 lane, u8 swing, u8 preem); 44 u8 lane, u8 swing, u8 preem);
41}; 45};
42 46
43struct nouveau_encoder { 47struct nouveau_encoder {
44 struct drm_encoder_slave base; 48 struct drm_encoder_slave base;
45 49
46 struct dcb_entry *dcb; 50 struct dcb_output *dcb;
47 int or; 51 int or;
48 52
49 /* different to drm_encoder.crtc, this reflects what's 53 /* different to drm_encoder.crtc, this reflects what's
@@ -87,18 +91,16 @@ get_slave_funcs(struct drm_encoder *enc)
87} 91}
88 92
89/* nouveau_dp.c */ 93/* nouveau_dp.c */
90int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
91 uint8_t *data, int data_nr);
92bool nouveau_dp_detect(struct drm_encoder *); 94bool nouveau_dp_detect(struct drm_encoder *);
93void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, 95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
94 struct dp_train_func *); 96 struct dp_train_func *);
95u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **); 97u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
96 98
97struct nouveau_connector * 99struct nouveau_connector *
98nouveau_encoder_connector_get(struct nouveau_encoder *encoder); 100nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
99int nv50_sor_create(struct drm_connector *, struct dcb_entry *); 101int nv50_sor_create(struct drm_connector *, struct dcb_output *);
100void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32); 102void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
101int nv50_dac_create(struct drm_connector *, struct dcb_entry *); 103int nv50_dac_create(struct drm_connector *, struct dcb_output *);
102 104
103 105
104#endif /* __NOUVEAU_ENCODER_H__ */ 106#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
deleted file mode 100644
index f3fb649fe454..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FB_H__
28#define __NOUVEAU_FB_H__
29
30struct nouveau_framebuffer {
31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo;
33 struct nouveau_vma vma;
34 u32 r_dma;
35 u32 r_format;
36 u32 r_pitch;
37};
38
39static inline struct nouveau_framebuffer *
40nouveau_framebuffer(struct drm_framebuffer *fb)
41{
42 return container_of(fb, struct nouveau_framebuffer, base);
43}
44
45int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
46 struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 1074bc5dd418..e75e071845b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -43,19 +43,30 @@
43#include "drm_crtc.h" 43#include "drm_crtc.h"
44#include "drm_crtc_helper.h" 44#include "drm_crtc_helper.h"
45#include "drm_fb_helper.h" 45#include "drm_fb_helper.h"
46#include "nouveau_drv.h" 46
47#include "nouveau_drm.h" 47#include "nouveau_drm.h"
48#include "nouveau_crtc.h" 48#include "nouveau_gem.h"
49#include "nouveau_fb.h" 49#include "nouveau_bo.h"
50#include "nouveau_fbcon.h" 50#include "nouveau_fbcon.h"
51#include "nouveau_dma.h" 51#include "nouveau_chan.h"
52
53#include "nouveau_crtc.h"
54
55#include <core/client.h>
56#include <core/device.h>
57
58#include <subdev/fb.h>
59
60MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
61static int nouveau_nofbaccel = 0;
62module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
52 63
53static void 64static void
54nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 65nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
55{ 66{
56 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *fbcon = info->par;
57 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
58 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_device *device = nv_device(drm->device);
59 int ret; 70 int ret;
60 71
61 if (info->state != FBINFO_STATE_RUNNING) 72 if (info->state != FBINFO_STATE_RUNNING)
@@ -63,15 +74,15 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
63 74
64 ret = -ENODEV; 75 ret = -ENODEV;
65 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 76 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
66 mutex_trylock(&dev_priv->channel->mutex)) { 77 mutex_trylock(&drm->client.mutex)) {
67 if (dev_priv->card_type < NV_50) 78 if (device->card_type < NV_50)
68 ret = nv04_fbcon_fillrect(info, rect); 79 ret = nv04_fbcon_fillrect(info, rect);
69 else 80 else
70 if (dev_priv->card_type < NV_C0) 81 if (device->card_type < NV_C0)
71 ret = nv50_fbcon_fillrect(info, rect); 82 ret = nv50_fbcon_fillrect(info, rect);
72 else 83 else
73 ret = nvc0_fbcon_fillrect(info, rect); 84 ret = nvc0_fbcon_fillrect(info, rect);
74 mutex_unlock(&dev_priv->channel->mutex); 85 mutex_unlock(&drm->client.mutex);
75 } 86 }
76 87
77 if (ret == 0) 88 if (ret == 0)
@@ -85,9 +96,9 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
85static void 96static void
86nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) 97nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
87{ 98{
88 struct nouveau_fbdev *nfbdev = info->par; 99 struct nouveau_fbdev *fbcon = info->par;
89 struct drm_device *dev = nfbdev->dev; 100 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
90 struct drm_nouveau_private *dev_priv = dev->dev_private; 101 struct nouveau_device *device = nv_device(drm->device);
91 int ret; 102 int ret;
92 103
93 if (info->state != FBINFO_STATE_RUNNING) 104 if (info->state != FBINFO_STATE_RUNNING)
@@ -95,15 +106,15 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
95 106
96 ret = -ENODEV; 107 ret = -ENODEV;
97 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 108 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
98 mutex_trylock(&dev_priv->channel->mutex)) { 109 mutex_trylock(&drm->client.mutex)) {
99 if (dev_priv->card_type < NV_50) 110 if (device->card_type < NV_50)
100 ret = nv04_fbcon_copyarea(info, image); 111 ret = nv04_fbcon_copyarea(info, image);
101 else 112 else
102 if (dev_priv->card_type < NV_C0) 113 if (device->card_type < NV_C0)
103 ret = nv50_fbcon_copyarea(info, image); 114 ret = nv50_fbcon_copyarea(info, image);
104 else 115 else
105 ret = nvc0_fbcon_copyarea(info, image); 116 ret = nvc0_fbcon_copyarea(info, image);
106 mutex_unlock(&dev_priv->channel->mutex); 117 mutex_unlock(&drm->client.mutex);
107 } 118 }
108 119
109 if (ret == 0) 120 if (ret == 0)
@@ -117,9 +128,9 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
117static void 128static void
118nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 129nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
119{ 130{
120 struct nouveau_fbdev *nfbdev = info->par; 131 struct nouveau_fbdev *fbcon = info->par;
121 struct drm_device *dev = nfbdev->dev; 132 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 133 struct nouveau_device *device = nv_device(drm->device);
123 int ret; 134 int ret;
124 135
125 if (info->state != FBINFO_STATE_RUNNING) 136 if (info->state != FBINFO_STATE_RUNNING)
@@ -127,15 +138,15 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
127 138
128 ret = -ENODEV; 139 ret = -ENODEV;
129 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 140 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
130 mutex_trylock(&dev_priv->channel->mutex)) { 141 mutex_trylock(&drm->client.mutex)) {
131 if (dev_priv->card_type < NV_50) 142 if (device->card_type < NV_50)
132 ret = nv04_fbcon_imageblit(info, image); 143 ret = nv04_fbcon_imageblit(info, image);
133 else 144 else
134 if (dev_priv->card_type < NV_C0) 145 if (device->card_type < NV_C0)
135 ret = nv50_fbcon_imageblit(info, image); 146 ret = nv50_fbcon_imageblit(info, image);
136 else 147 else
137 ret = nvc0_fbcon_imageblit(info, image); 148 ret = nvc0_fbcon_imageblit(info, image);
138 mutex_unlock(&dev_priv->channel->mutex); 149 mutex_unlock(&drm->client.mutex);
139 } 150 }
140 151
141 if (ret == 0) 152 if (ret == 0)
@@ -149,10 +160,9 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
149static int 160static int
150nouveau_fbcon_sync(struct fb_info *info) 161nouveau_fbcon_sync(struct fb_info *info)
151{ 162{
152 struct nouveau_fbdev *nfbdev = info->par; 163 struct nouveau_fbdev *fbcon = info->par;
153 struct drm_device *dev = nfbdev->dev; 164 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
154 struct drm_nouveau_private *dev_priv = dev->dev_private; 165 struct nouveau_channel *chan = drm->channel;
155 struct nouveau_channel *chan = dev_priv->channel;
156 int ret; 166 int ret;
157 167
158 if (!chan || !chan->accel_done || in_interrupt() || 168 if (!chan || !chan->accel_done || in_interrupt() ||
@@ -160,11 +170,11 @@ nouveau_fbcon_sync(struct fb_info *info)
160 info->flags & FBINFO_HWACCEL_DISABLED) 170 info->flags & FBINFO_HWACCEL_DISABLED)
161 return 0; 171 return 0;
162 172
163 if (!mutex_trylock(&chan->mutex)) 173 if (!mutex_trylock(&drm->client.mutex))
164 return 0; 174 return 0;
165 175
166 ret = nouveau_channel_idle(chan); 176 ret = nouveau_channel_idle(chan);
167 mutex_unlock(&chan->mutex); 177 mutex_unlock(&drm->client.mutex);
168 if (ret) { 178 if (ret) {
169 nouveau_fbcon_gpu_lockup(info); 179 nouveau_fbcon_gpu_lockup(info);
170 return 0; 180 return 0;
@@ -224,9 +234,9 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
224} 234}
225 235
226static void 236static void
227nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 237nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
228{ 238{
229 struct fb_info *info = nfbdev->helper.fbdev; 239 struct fb_info *info = fbcon->helper.fbdev;
230 struct fb_fillrect rect; 240 struct fb_fillrect rect;
231 241
232 /* Clear the entire fbcon. The drm will program every connector 242 /* Clear the entire fbcon. The drm will program every connector
@@ -242,11 +252,12 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
242} 252}
243 253
244static int 254static int
245nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, 255nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
246 struct drm_fb_helper_surface_size *sizes) 256 struct drm_fb_helper_surface_size *sizes)
247{ 257{
248 struct drm_device *dev = nfbdev->dev; 258 struct drm_device *dev = fbcon->dev;
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 259 struct nouveau_drm *drm = nouveau_drm(dev);
260 struct nouveau_device *device = nv_device(drm->device);
250 struct fb_info *info; 261 struct fb_info *info;
251 struct drm_framebuffer *fb; 262 struct drm_framebuffer *fb;
252 struct nouveau_framebuffer *nouveau_fb; 263 struct nouveau_framebuffer *nouveau_fb;
@@ -254,7 +265,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
254 struct nouveau_bo *nvbo; 265 struct nouveau_bo *nvbo;
255 struct drm_mode_fb_cmd2 mode_cmd; 266 struct drm_mode_fb_cmd2 mode_cmd;
256 struct pci_dev *pdev = dev->pdev; 267 struct pci_dev *pdev = dev->pdev;
257 struct device *device = &pdev->dev;
258 int size, ret; 268 int size, ret;
259 269
260 mode_cmd.width = sizes->surface_width; 270 mode_cmd.width = sizes->surface_width;
@@ -272,37 +282,38 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
272 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 282 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
273 0, 0x0000, &nvbo); 283 0, 0x0000, &nvbo);
274 if (ret) { 284 if (ret) {
275 NV_ERROR(dev, "failed to allocate framebuffer\n"); 285 NV_ERROR(drm, "failed to allocate framebuffer\n");
276 goto out; 286 goto out;
277 } 287 }
278 288
279 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); 289 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
280 if (ret) { 290 if (ret) {
281 NV_ERROR(dev, "failed to pin fb: %d\n", ret); 291 NV_ERROR(drm, "failed to pin fb: %d\n", ret);
282 nouveau_bo_ref(NULL, &nvbo); 292 nouveau_bo_ref(NULL, &nvbo);
283 goto out; 293 goto out;
284 } 294 }
285 295
286 ret = nouveau_bo_map(nvbo); 296 ret = nouveau_bo_map(nvbo);
287 if (ret) { 297 if (ret) {
288 NV_ERROR(dev, "failed to map fb: %d\n", ret); 298 NV_ERROR(drm, "failed to map fb: %d\n", ret);
289 nouveau_bo_unpin(nvbo); 299 nouveau_bo_unpin(nvbo);
290 nouveau_bo_ref(NULL, &nvbo); 300 nouveau_bo_ref(NULL, &nvbo);
291 goto out; 301 goto out;
292 } 302 }
293 303
294 chan = nouveau_nofbaccel ? NULL : dev_priv->channel; 304 chan = nouveau_nofbaccel ? NULL : drm->channel;
295 if (chan && dev_priv->card_type >= NV_50) { 305 if (chan && device->card_type >= NV_50) {
296 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); 306 ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
307 &fbcon->nouveau_fb.vma);
297 if (ret) { 308 if (ret) {
298 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); 309 NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
299 chan = NULL; 310 chan = NULL;
300 } 311 }
301 } 312 }
302 313
303 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
304 315
305 info = framebuffer_alloc(0, device); 316 info = framebuffer_alloc(0, &pdev->dev);
306 if (!info) { 317 if (!info) {
307 ret = -ENOMEM; 318 ret = -ENOMEM;
308 goto out_unref; 319 goto out_unref;
@@ -314,16 +325,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
314 goto out_unref; 325 goto out_unref;
315 } 326 }
316 327
317 info->par = nfbdev; 328 info->par = fbcon;
318 329
319 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); 330 nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
320 331
321 nouveau_fb = &nfbdev->nouveau_fb; 332 nouveau_fb = &fbcon->nouveau_fb;
322 fb = &nouveau_fb->base; 333 fb = &nouveau_fb->base;
323 334
324 /* setup helper */ 335 /* setup helper */
325 nfbdev->helper.fb = fb; 336 fbcon->helper.fb = fb;
326 nfbdev->helper.fbdev = info; 337 fbcon->helper.fbdev = info;
327 338
328 strcpy(info->fix.id, "nouveaufb"); 339 strcpy(info->fix.id, "nouveaufb");
329 if (nouveau_nofbaccel) 340 if (nouveau_nofbaccel)
@@ -342,25 +353,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
342 info->screen_size = size; 353 info->screen_size = size;
343 354
344 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 355 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
345 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 356 drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
346
347 /* Set aperture base/size for vesafb takeover */
348 info->apertures = dev_priv->apertures;
349 if (!info->apertures) {
350 ret = -ENOMEM;
351 goto out_unref;
352 }
353 357
354 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 358 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
355 359
356 mutex_unlock(&dev->struct_mutex); 360 mutex_unlock(&dev->struct_mutex);
357 361
358 if (dev_priv->channel && !nouveau_nofbaccel) { 362 if (chan) {
359 ret = -ENODEV; 363 ret = -ENODEV;
360 if (dev_priv->card_type < NV_50) 364 if (device->card_type < NV_50)
361 ret = nv04_fbcon_accel_init(info); 365 ret = nv04_fbcon_accel_init(info);
362 else 366 else
363 if (dev_priv->card_type < NV_C0) 367 if (device->card_type < NV_C0)
364 ret = nv50_fbcon_accel_init(info); 368 ret = nv50_fbcon_accel_init(info);
365 else 369 else
366 ret = nvc0_fbcon_accel_init(info); 370 ret = nvc0_fbcon_accel_init(info);
@@ -369,13 +373,12 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
369 info->fbops = &nouveau_fbcon_ops; 373 info->fbops = &nouveau_fbcon_ops;
370 } 374 }
371 375
372 nouveau_fbcon_zfill(dev, nfbdev); 376 nouveau_fbcon_zfill(dev, fbcon);
373 377
374 /* To allow resizeing without swapping buffers */ 378 /* To allow resizeing without swapping buffers */
375 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", 379 NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
376 nouveau_fb->base.width, 380 nouveau_fb->base.width, nouveau_fb->base.height,
377 nouveau_fb->base.height, 381 nvbo->bo.offset, nvbo);
378 nvbo->bo.offset, nvbo);
379 382
380 vga_switcheroo_client_fb_set(dev->pdev, info); 383 vga_switcheroo_client_fb_set(dev->pdev, info);
381 return 0; 384 return 0;
@@ -390,12 +393,12 @@ static int
390nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, 393nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
391 struct drm_fb_helper_surface_size *sizes) 394 struct drm_fb_helper_surface_size *sizes)
392{ 395{
393 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; 396 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
394 int new_fb = 0; 397 int new_fb = 0;
395 int ret; 398 int ret;
396 399
397 if (!helper->fb) { 400 if (!helper->fb) {
398 ret = nouveau_fbcon_create(nfbdev, sizes); 401 ret = nouveau_fbcon_create(fbcon, sizes);
399 if (ret) 402 if (ret)
400 return ret; 403 return ret;
401 new_fb = 1; 404 new_fb = 1;
@@ -406,18 +409,18 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
406void 409void
407nouveau_fbcon_output_poll_changed(struct drm_device *dev) 410nouveau_fbcon_output_poll_changed(struct drm_device *dev)
408{ 411{
409 struct drm_nouveau_private *dev_priv = dev->dev_private; 412 struct nouveau_drm *drm = nouveau_drm(dev);
410 drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); 413 drm_fb_helper_hotplug_event(&drm->fbcon->helper);
411} 414}
412 415
413static int 416static int
414nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 417nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
415{ 418{
416 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; 419 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
417 struct fb_info *info; 420 struct fb_info *info;
418 421
419 if (nfbdev->helper.fbdev) { 422 if (fbcon->helper.fbdev) {
420 info = nfbdev->helper.fbdev; 423 info = fbcon->helper.fbdev;
421 unregister_framebuffer(info); 424 unregister_framebuffer(info);
422 if (info->cmap.len) 425 if (info->cmap.len)
423 fb_dealloc_cmap(&info->cmap); 426 fb_dealloc_cmap(&info->cmap);
@@ -430,17 +433,17 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
430 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 433 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
431 nouveau_fb->nvbo = NULL; 434 nouveau_fb->nvbo = NULL;
432 } 435 }
433 drm_fb_helper_fini(&nfbdev->helper); 436 drm_fb_helper_fini(&fbcon->helper);
434 drm_framebuffer_cleanup(&nouveau_fb->base); 437 drm_framebuffer_cleanup(&nouveau_fb->base);
435 return 0; 438 return 0;
436} 439}
437 440
438void nouveau_fbcon_gpu_lockup(struct fb_info *info) 441void nouveau_fbcon_gpu_lockup(struct fb_info *info)
439{ 442{
440 struct nouveau_fbdev *nfbdev = info->par; 443 struct nouveau_fbdev *fbcon = info->par;
441 struct drm_device *dev = nfbdev->dev; 444 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
442 445
443 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 446 NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
444 info->flags |= FBINFO_HWACCEL_DISABLED; 447 info->flags |= FBINFO_HWACCEL_DISABLED;
445} 448}
446 449
@@ -451,74 +454,81 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
451}; 454};
452 455
453 456
454int nouveau_fbcon_init(struct drm_device *dev) 457int
458nouveau_fbcon_init(struct drm_device *dev)
455{ 459{
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 460 struct nouveau_drm *drm = nouveau_drm(dev);
457 struct nouveau_fbdev *nfbdev; 461 struct nouveau_fb *pfb = nouveau_fb(drm->device);
462 struct nouveau_fbdev *fbcon;
458 int preferred_bpp; 463 int preferred_bpp;
459 int ret; 464 int ret;
460 465
461 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 466 if (!dev->mode_config.num_crtc)
462 if (!nfbdev) 467 return 0;
468
469 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
470 if (!fbcon)
463 return -ENOMEM; 471 return -ENOMEM;
464 472
465 nfbdev->dev = dev; 473 fbcon->dev = dev;
466 dev_priv->nfbdev = nfbdev; 474 drm->fbcon = fbcon;
467 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; 475 fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
468 476
469 ret = drm_fb_helper_init(dev, &nfbdev->helper, 477 ret = drm_fb_helper_init(dev, &fbcon->helper,
470 dev->mode_config.num_crtc, 4); 478 dev->mode_config.num_crtc, 4);
471 if (ret) { 479 if (ret) {
472 kfree(nfbdev); 480 kfree(fbcon);
473 return ret; 481 return ret;
474 } 482 }
475 483
476 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 484 drm_fb_helper_single_add_all_connectors(&fbcon->helper);
477 485
478 if (dev_priv->vram_size <= 32 * 1024 * 1024) 486 if (pfb->ram.size <= 32 * 1024 * 1024)
479 preferred_bpp = 8; 487 preferred_bpp = 8;
480 else if (dev_priv->vram_size <= 64 * 1024 * 1024) 488 else
489 if (pfb->ram.size <= 64 * 1024 * 1024)
481 preferred_bpp = 16; 490 preferred_bpp = 16;
482 else 491 else
483 preferred_bpp = 32; 492 preferred_bpp = 32;
484 493
485 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); 494 drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
486 return 0; 495 return 0;
487} 496}
488 497
489void nouveau_fbcon_fini(struct drm_device *dev) 498void
499nouveau_fbcon_fini(struct drm_device *dev)
490{ 500{
491 struct drm_nouveau_private *dev_priv = dev->dev_private; 501 struct nouveau_drm *drm = nouveau_drm(dev);
492 502
493 if (!dev_priv->nfbdev) 503 if (!drm->fbcon)
494 return; 504 return;
495 505
496 nouveau_fbcon_destroy(dev, dev_priv->nfbdev); 506 nouveau_fbcon_destroy(dev, drm->fbcon);
497 kfree(dev_priv->nfbdev); 507 kfree(drm->fbcon);
498 dev_priv->nfbdev = NULL; 508 drm->fbcon = NULL;
499} 509}
500 510
501void nouveau_fbcon_save_disable_accel(struct drm_device *dev) 511void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
502{ 512{
503 struct drm_nouveau_private *dev_priv = dev->dev_private; 513 struct nouveau_drm *drm = nouveau_drm(dev);
504 514
505 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; 515 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
506 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 516 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
507} 517}
508 518
509void nouveau_fbcon_restore_accel(struct drm_device *dev) 519void nouveau_fbcon_restore_accel(struct drm_device *dev)
510{ 520{
511 struct drm_nouveau_private *dev_priv = dev->dev_private; 521 struct nouveau_drm *drm = nouveau_drm(dev);
512 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; 522 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
513} 523}
514 524
515void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 525void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
516{ 526{
517 struct drm_nouveau_private *dev_priv = dev->dev_private; 527 struct nouveau_drm *drm = nouveau_drm(dev);
518 console_lock(); 528 console_lock();
519 if (state == 0) 529 if (state == 0)
520 nouveau_fbcon_save_disable_accel(dev); 530 nouveau_fbcon_save_disable_accel(dev);
521 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); 531 fb_set_suspend(drm->fbcon->helper.fbdev, state);
522 if (state == 1) 532 if (state == 1)
523 nouveau_fbcon_restore_accel(dev); 533 nouveau_fbcon_restore_accel(dev);
524 console_unlock(); 534 console_unlock();
@@ -526,6 +536,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
526 536
527void nouveau_fbcon_zfill_all(struct drm_device *dev) 537void nouveau_fbcon_zfill_all(struct drm_device *dev)
528{ 538{
529 struct drm_nouveau_private *dev_priv = dev->dev_private; 539 struct nouveau_drm *drm = nouveau_drm(dev);
530 nouveau_fbcon_zfill(dev, dev_priv->nfbdev); 540 nouveau_fbcon_zfill(dev, drm->fbcon);
531} 541}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index b73c29f87fc3..18e028008225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,7 +29,8 @@
29 29
30#include "drm_fb_helper.h" 30#include "drm_fb_helper.h"
31 31
32#include "nouveau_fb.h" 32#include "nouveau_display.h"
33
33struct nouveau_fbdev { 34struct nouveau_fbdev {
34 struct drm_fb_helper helper; 35 struct drm_fb_helper helper;
35 struct nouveau_framebuffer nouveau_fb; 36 struct nouveau_framebuffer nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 3c180493dab8..5b5471ba6eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -30,11 +30,9 @@
30#include <linux/ktime.h> 30#include <linux/ktime.h>
31#include <linux/hrtimer.h> 31#include <linux/hrtimer.h>
32 32
33#include "nouveau_drv.h" 33#include "nouveau_drm.h"
34#include "nouveau_ramht.h"
35#include "nouveau_fence.h"
36#include "nouveau_software.h"
37#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_fence.h"
38 36
39void 37void
40nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 38nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
@@ -54,16 +52,16 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
54void 52void
55nouveau_fence_context_new(struct nouveau_fence_chan *fctx) 53nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
56{ 54{
55 INIT_LIST_HEAD(&fctx->flip);
57 INIT_LIST_HEAD(&fctx->pending); 56 INIT_LIST_HEAD(&fctx->pending);
58 spin_lock_init(&fctx->lock); 57 spin_lock_init(&fctx->lock);
59} 58}
60 59
61void 60static void
62nouveau_fence_update(struct nouveau_channel *chan) 61nouveau_fence_update(struct nouveau_channel *chan)
63{ 62{
64 struct drm_device *dev = chan->dev; 63 struct nouveau_fence_priv *priv = chan->drm->fence;
65 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); 64 struct nouveau_fence_chan *fctx = chan->fence;
66 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
67 struct nouveau_fence *fence, *fnext; 65 struct nouveau_fence *fence, *fnext;
68 66
69 spin_lock(&fctx->lock); 67 spin_lock(&fctx->lock);
@@ -83,9 +81,8 @@ nouveau_fence_update(struct nouveau_channel *chan)
83int 81int
84nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 82nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
85{ 83{
86 struct drm_device *dev = chan->dev; 84 struct nouveau_fence_priv *priv = chan->drm->fence;
87 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); 85 struct nouveau_fence_chan *fctx = chan->fence;
88 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
89 int ret; 86 int ret;
90 87
91 fence->channel = chan; 88 fence->channel = chan;
@@ -147,19 +144,17 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
147int 144int
148nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 145nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
149{ 146{
150 struct drm_device *dev = chan->dev; 147 struct nouveau_fence_priv *priv = chan->drm->fence;
151 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
152 struct nouveau_channel *prev; 148 struct nouveau_channel *prev;
153 int ret = 0; 149 int ret = 0;
154 150
155 prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; 151 prev = fence ? fence->channel : NULL;
156 if (prev) { 152 if (prev) {
157 if (unlikely(prev != chan && !nouveau_fence_done(fence))) { 153 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
158 ret = priv->sync(fence, prev, chan); 154 ret = priv->sync(fence, prev, chan);
159 if (unlikely(ret)) 155 if (unlikely(ret))
160 ret = nouveau_fence_wait(fence, true, false); 156 ret = nouveau_fence_wait(fence, true, false);
161 } 157 }
162 nouveau_channel_put_unlocked(&prev);
163 } 158 }
164 159
165 return ret; 160 return ret;
@@ -193,7 +188,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
193 struct nouveau_fence *fence; 188 struct nouveau_fence *fence;
194 int ret = 0; 189 int ret = 0;
195 190
196 if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE])) 191 if (unlikely(!chan->fence))
197 return -ENODEV; 192 return -ENODEV;
198 193
199 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 194 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 82ba733393ae..bedafd1c9539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,6 +1,8 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4struct nouveau_drm;
5
4struct nouveau_fence { 6struct nouveau_fence {
5 struct list_head head; 7 struct list_head head;
6 struct kref kref; 8 struct kref kref;
@@ -22,31 +24,48 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
22bool nouveau_fence_done(struct nouveau_fence *); 24bool nouveau_fence_done(struct nouveau_fence *);
23int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 25int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
24int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 26int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *);
27 27
28struct nouveau_fence_chan { 28struct nouveau_fence_chan {
29 struct list_head pending; 29 struct list_head pending;
30 struct list_head flip;
31
30 spinlock_t lock; 32 spinlock_t lock;
31 u32 sequence; 33 u32 sequence;
32}; 34};
33 35
34struct nouveau_fence_priv { 36struct nouveau_fence_priv {
35 struct nouveau_exec_engine engine; 37 void (*dtor)(struct nouveau_drm *);
36 int (*emit)(struct nouveau_fence *); 38 bool (*suspend)(struct nouveau_drm *);
37 int (*sync)(struct nouveau_fence *, struct nouveau_channel *, 39 void (*resume)(struct nouveau_drm *);
38 struct nouveau_channel *); 40 int (*context_new)(struct nouveau_channel *);
39 u32 (*read)(struct nouveau_channel *); 41 void (*context_del)(struct nouveau_channel *);
42 int (*emit)(struct nouveau_fence *);
43 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
44 struct nouveau_channel *);
45 u32 (*read)(struct nouveau_channel *);
40}; 46};
41 47
48#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
49
42void nouveau_fence_context_new(struct nouveau_fence_chan *); 50void nouveau_fence_context_new(struct nouveau_fence_chan *);
43void nouveau_fence_context_del(struct nouveau_fence_chan *); 51void nouveau_fence_context_del(struct nouveau_fence_chan *);
44 52
45int nv04_fence_create(struct drm_device *dev); 53int nv04_fence_create(struct nouveau_drm *);
46int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); 54int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
47 55
48int nv10_fence_create(struct drm_device *dev); 56int nv10_fence_emit(struct nouveau_fence *);
49int nv84_fence_create(struct drm_device *dev); 57int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
50int nvc0_fence_create(struct drm_device *dev); 58 struct nouveau_channel *);
59u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *);
63
64int nv50_fence_create(struct nouveau_drm *);
65int nv84_fence_create(struct nouveau_drm *);
66int nvc0_fence_create(struct nouveau_drm *);
67u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
68
69int nouveau_flip_complete(void *chan);
51 70
52#endif 71#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
deleted file mode 100644
index ce99cab2f257..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fifo.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4struct nouveau_fifo_priv {
5 struct nouveau_exec_engine base;
6 u32 channels;
7};
8
9struct nouveau_fifo_chan {
10};
11
12bool nv04_fifo_cache_pull(struct drm_device *, bool);
13void nv04_fifo_context_del(struct nouveau_channel *, int);
14int nv04_fifo_fini(struct drm_device *, int, bool);
15int nv04_fifo_init(struct drm_device *, int);
16void nv04_fifo_isr(struct drm_device *);
17void nv04_fifo_destroy(struct drm_device *, int);
18
19void nv50_fifo_playlist_update(struct drm_device *);
20void nv50_fifo_destroy(struct drm_device *, int);
21void nv50_fifo_tlb_flush(struct drm_device *, int);
22
23int nv04_fifo_create(struct drm_device *);
24int nv10_fifo_create(struct drm_device *);
25int nv17_fifo_create(struct drm_device *);
26int nv40_fifo_create(struct drm_device *);
27int nv50_fifo_create(struct drm_device *);
28int nv84_fifo_create(struct drm_device *);
29int nvc0_fifo_create(struct drm_device *);
30int nve0_fifo_create(struct drm_device *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af7cfb825716..6454370e78cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,16 +23,19 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26
26#include <linux/dma-buf.h> 27#include <linux/dma-buf.h>
27#include "drmP.h" 28#include <nouveau_drm.h>
28#include "drm.h" 29
30#include <subdev/fb.h>
29 31
30#include "nouveau_drv.h"
31#include "nouveau_drm.h" 32#include "nouveau_drm.h"
32#include "nouveau_dma.h" 33#include "nouveau_dma.h"
33#include "nouveau_fence.h" 34#include "nouveau_fence.h"
35#include "nouveau_abi16.h"
34 36
35#define nouveau_gem_pushbuf_sync(chan) 0 37#include "nouveau_ttm.h"
38#include "nouveau_gem.h"
36 39
37int 40int
38nouveau_gem_object_new(struct drm_gem_object *gem) 41nouveau_gem_object_new(struct drm_gem_object *gem)
@@ -67,19 +70,19 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
67int 70int
68nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 71nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
69{ 72{
70 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 73 struct nouveau_cli *cli = nouveau_cli(file_priv);
71 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 74 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
72 struct nouveau_vma *vma; 75 struct nouveau_vma *vma;
73 int ret; 76 int ret;
74 77
75 if (!fpriv->vm) 78 if (!cli->base.vm)
76 return 0; 79 return 0;
77 80
78 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 81 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
79 if (ret) 82 if (ret)
80 return ret; 83 return ret;
81 84
82 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 85 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
83 if (!vma) { 86 if (!vma) {
84 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 87 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
85 if (!vma) { 88 if (!vma) {
@@ -87,7 +90,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
87 goto out; 90 goto out;
88 } 91 }
89 92
90 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); 93 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
91 if (ret) { 94 if (ret) {
92 kfree(vma); 95 kfree(vma);
93 goto out; 96 goto out;
@@ -104,19 +107,19 @@ out:
104void 107void
105nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 108nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106{ 109{
107 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 110 struct nouveau_cli *cli = nouveau_cli(file_priv);
108 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 111 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
109 struct nouveau_vma *vma; 112 struct nouveau_vma *vma;
110 int ret; 113 int ret;
111 114
112 if (!fpriv->vm) 115 if (!cli->base.vm)
113 return; 116 return;
114 117
115 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 118 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
116 if (ret) 119 if (ret)
117 return; 120 return;
118 121
119 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 122 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
120 if (vma) { 123 if (vma) {
121 if (--vma->refcount == 0) { 124 if (--vma->refcount == 0) {
122 nouveau_bo_vma_del(nvbo, vma); 125 nouveau_bo_vma_del(nvbo, vma);
@@ -131,7 +134,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
131 uint32_t tile_mode, uint32_t tile_flags, 134 uint32_t tile_mode, uint32_t tile_flags,
132 struct nouveau_bo **pnvbo) 135 struct nouveau_bo **pnvbo)
133{ 136{
134 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct nouveau_drm *drm = nouveau_drm(dev);
135 struct nouveau_bo *nvbo; 138 struct nouveau_bo *nvbo;
136 u32 flags = 0; 139 u32 flags = 0;
137 int ret; 140 int ret;
@@ -155,7 +158,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
155 */ 158 */
156 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 159 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
157 NOUVEAU_GEM_DOMAIN_GART; 160 NOUVEAU_GEM_DOMAIN_GART;
158 if (dev_priv->card_type >= NV_50) 161 if (nv_device(drm->device)->card_type >= NV_50)
159 nvbo->valid_domains &= domain; 162 nvbo->valid_domains &= domain;
160 163
161 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 164 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
@@ -173,7 +176,7 @@ static int
173nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 176nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
174 struct drm_nouveau_gem_info *rep) 177 struct drm_nouveau_gem_info *rep)
175{ 178{
176 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 179 struct nouveau_cli *cli = nouveau_cli(file_priv);
177 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 180 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
178 struct nouveau_vma *vma; 181 struct nouveau_vma *vma;
179 182
@@ -183,8 +186,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
183 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 186 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
184 187
185 rep->offset = nvbo->bo.offset; 188 rep->offset = nvbo->bo.offset;
186 if (fpriv->vm) { 189 if (cli->base.vm) {
187 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 190 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
188 if (!vma) 191 if (!vma)
189 return -EINVAL; 192 return -EINVAL;
190 193
@@ -202,15 +205,16 @@ int
202nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 205nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203 struct drm_file *file_priv) 206 struct drm_file *file_priv)
204{ 207{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 208 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nouveau_fb *pfb = nouveau_fb(drm->device);
206 struct drm_nouveau_gem_new *req = data; 210 struct drm_nouveau_gem_new *req = data;
207 struct nouveau_bo *nvbo = NULL; 211 struct nouveau_bo *nvbo = NULL;
208 int ret = 0; 212 int ret = 0;
209 213
210 dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; 214 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
211 215
212 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { 216 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
213 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); 217 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
214 return -EINVAL; 218 return -EINVAL;
215 } 219 }
216 220
@@ -312,16 +316,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
312 struct drm_nouveau_gem_pushbuf_bo *pbbo, 316 struct drm_nouveau_gem_pushbuf_bo *pbbo,
313 int nr_buffers, struct validate_op *op) 317 int nr_buffers, struct validate_op *op)
314{ 318{
315 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->drm->dev;
316 struct drm_nouveau_private *dev_priv = dev->dev_private; 320 struct nouveau_drm *drm = nouveau_drm(dev);
317 uint32_t sequence; 321 uint32_t sequence;
318 int trycnt = 0; 322 int trycnt = 0;
319 int ret, i; 323 int ret, i;
320 324
321 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); 325 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
322retry: 326retry:
323 if (++trycnt > 100000) { 327 if (++trycnt > 100000) {
324 NV_ERROR(dev, "%s failed and gave up.\n", __func__); 328 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
325 return -EINVAL; 329 return -EINVAL;
326 } 330 }
327 331
@@ -332,14 +336,14 @@ retry:
332 336
333 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 337 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
334 if (!gem) { 338 if (!gem) {
335 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); 339 NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
336 validate_fini(op, NULL); 340 validate_fini(op, NULL);
337 return -ENOENT; 341 return -ENOENT;
338 } 342 }
339 nvbo = gem->driver_private; 343 nvbo = gem->driver_private;
340 344
341 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 345 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
342 NV_ERROR(dev, "multiple instances of buffer %d on " 346 NV_ERROR(drm, "multiple instances of buffer %d on "
343 "validation list\n", b->handle); 347 "validation list\n", b->handle);
344 drm_gem_object_unreference_unlocked(gem); 348 drm_gem_object_unreference_unlocked(gem);
345 validate_fini(op, NULL); 349 validate_fini(op, NULL);
@@ -354,7 +358,7 @@ retry:
354 drm_gem_object_unreference_unlocked(gem); 358 drm_gem_object_unreference_unlocked(gem);
355 if (unlikely(ret)) { 359 if (unlikely(ret)) {
356 if (ret != -ERESTARTSYS) 360 if (ret != -ERESTARTSYS)
357 NV_ERROR(dev, "fail reserve\n"); 361 NV_ERROR(drm, "fail reserve\n");
358 return ret; 362 return ret;
359 } 363 }
360 goto retry; 364 goto retry;
@@ -373,7 +377,7 @@ retry:
373 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 377 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
374 list_add_tail(&nvbo->entry, &op->gart_list); 378 list_add_tail(&nvbo->entry, &op->gart_list);
375 else { 379 else {
376 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 380 NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
377 b->valid_domains); 381 b->valid_domains);
378 list_add_tail(&nvbo->entry, &op->both_list); 382 list_add_tail(&nvbo->entry, &op->both_list);
379 validate_fini(op, NULL); 383 validate_fini(op, NULL);
@@ -407,10 +411,9 @@ static int
407validate_list(struct nouveau_channel *chan, struct list_head *list, 411validate_list(struct nouveau_channel *chan, struct list_head *list,
408 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 412 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
409{ 413{
410 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 414 struct nouveau_drm *drm = chan->drm;
411 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 415 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
412 (void __force __user *)(uintptr_t)user_pbbo_ptr; 416 (void __force __user *)(uintptr_t)user_pbbo_ptr;
413 struct drm_device *dev = chan->dev;
414 struct nouveau_bo *nvbo; 417 struct nouveau_bo *nvbo;
415 int ret, relocs = 0; 418 int ret, relocs = 0;
416 419
@@ -419,7 +422,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
419 422
420 ret = validate_sync(chan, nvbo); 423 ret = validate_sync(chan, nvbo);
421 if (unlikely(ret)) { 424 if (unlikely(ret)) {
422 NV_ERROR(dev, "fail pre-validate sync\n"); 425 NV_ERROR(drm, "fail pre-validate sync\n");
423 return ret; 426 return ret;
424 } 427 }
425 428
@@ -427,24 +430,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
427 b->write_domains, 430 b->write_domains,
428 b->valid_domains); 431 b->valid_domains);
429 if (unlikely(ret)) { 432 if (unlikely(ret)) {
430 NV_ERROR(dev, "fail set_domain\n"); 433 NV_ERROR(drm, "fail set_domain\n");
431 return ret; 434 return ret;
432 } 435 }
433 436
434 ret = nouveau_bo_validate(nvbo, true, false, false); 437 ret = nouveau_bo_validate(nvbo, true, false, false);
435 if (unlikely(ret)) { 438 if (unlikely(ret)) {
436 if (ret != -ERESTARTSYS) 439 if (ret != -ERESTARTSYS)
437 NV_ERROR(dev, "fail ttm_validate\n"); 440 NV_ERROR(drm, "fail ttm_validate\n");
438 return ret; 441 return ret;
439 } 442 }
440 443
441 ret = validate_sync(chan, nvbo); 444 ret = validate_sync(chan, nvbo);
442 if (unlikely(ret)) { 445 if (unlikely(ret)) {
443 NV_ERROR(dev, "fail post-validate sync\n"); 446 NV_ERROR(drm, "fail post-validate sync\n");
444 return ret; 447 return ret;
445 } 448 }
446 449
447 if (dev_priv->card_type < NV_50) { 450 if (nv_device(drm->device)->card_type < NV_50) {
448 if (nvbo->bo.offset == b->presumed.offset && 451 if (nvbo->bo.offset == b->presumed.offset &&
449 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 452 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
450 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 453 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -476,7 +479,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
476 uint64_t user_buffers, int nr_buffers, 479 uint64_t user_buffers, int nr_buffers,
477 struct validate_op *op, int *apply_relocs) 480 struct validate_op *op, int *apply_relocs)
478{ 481{
479 struct drm_device *dev = chan->dev; 482 struct nouveau_drm *drm = chan->drm;
480 int ret, relocs = 0; 483 int ret, relocs = 0;
481 484
482 INIT_LIST_HEAD(&op->vram_list); 485 INIT_LIST_HEAD(&op->vram_list);
@@ -489,14 +492,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
489 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 492 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
490 if (unlikely(ret)) { 493 if (unlikely(ret)) {
491 if (ret != -ERESTARTSYS) 494 if (ret != -ERESTARTSYS)
492 NV_ERROR(dev, "validate_init\n"); 495 NV_ERROR(drm, "validate_init\n");
493 return ret; 496 return ret;
494 } 497 }
495 498
496 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 499 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
497 if (unlikely(ret < 0)) { 500 if (unlikely(ret < 0)) {
498 if (ret != -ERESTARTSYS) 501 if (ret != -ERESTARTSYS)
499 NV_ERROR(dev, "validate vram_list\n"); 502 NV_ERROR(drm, "validate vram_list\n");
500 validate_fini(op, NULL); 503 validate_fini(op, NULL);
501 return ret; 504 return ret;
502 } 505 }
@@ -505,7 +508,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
505 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 508 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
506 if (unlikely(ret < 0)) { 509 if (unlikely(ret < 0)) {
507 if (ret != -ERESTARTSYS) 510 if (ret != -ERESTARTSYS)
508 NV_ERROR(dev, "validate gart_list\n"); 511 NV_ERROR(drm, "validate gart_list\n");
509 validate_fini(op, NULL); 512 validate_fini(op, NULL);
510 return ret; 513 return ret;
511 } 514 }
@@ -514,7 +517,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
514 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 517 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
515 if (unlikely(ret < 0)) { 518 if (unlikely(ret < 0)) {
516 if (ret != -ERESTARTSYS) 519 if (ret != -ERESTARTSYS)
517 NV_ERROR(dev, "validate both_list\n"); 520 NV_ERROR(drm, "validate both_list\n");
518 validate_fini(op, NULL); 521 validate_fini(op, NULL);
519 return ret; 522 return ret;
520 } 523 }
@@ -547,6 +550,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
547 struct drm_nouveau_gem_pushbuf *req, 550 struct drm_nouveau_gem_pushbuf *req,
548 struct drm_nouveau_gem_pushbuf_bo *bo) 551 struct drm_nouveau_gem_pushbuf_bo *bo)
549{ 552{
553 struct nouveau_drm *drm = nouveau_drm(dev);
550 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 554 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
551 int ret = 0; 555 int ret = 0;
552 unsigned i; 556 unsigned i;
@@ -562,7 +566,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
562 uint32_t data; 566 uint32_t data;
563 567
564 if (unlikely(r->bo_index > req->nr_buffers)) { 568 if (unlikely(r->bo_index > req->nr_buffers)) {
565 NV_ERROR(dev, "reloc bo index invalid\n"); 569 NV_ERROR(drm, "reloc bo index invalid\n");
566 ret = -EINVAL; 570 ret = -EINVAL;
567 break; 571 break;
568 } 572 }
@@ -572,7 +576,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
572 continue; 576 continue;
573 577
574 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 578 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
575 NV_ERROR(dev, "reloc container bo index invalid\n"); 579 NV_ERROR(drm, "reloc container bo index invalid\n");
576 ret = -EINVAL; 580 ret = -EINVAL;
577 break; 581 break;
578 } 582 }
@@ -580,7 +584,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
580 584
581 if (unlikely(r->reloc_bo_offset + 4 > 585 if (unlikely(r->reloc_bo_offset + 4 >
582 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 586 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
583 NV_ERROR(dev, "reloc outside of bo\n"); 587 NV_ERROR(drm, "reloc outside of bo\n");
584 ret = -EINVAL; 588 ret = -EINVAL;
585 break; 589 break;
586 } 590 }
@@ -589,7 +593,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
589 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 593 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
590 &nvbo->kmap); 594 &nvbo->kmap);
591 if (ret) { 595 if (ret) {
592 NV_ERROR(dev, "failed kmap for reloc\n"); 596 NV_ERROR(drm, "failed kmap for reloc\n");
593 break; 597 break;
594 } 598 }
595 nvbo->validate_mapped = true; 599 nvbo->validate_mapped = true;
@@ -614,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
614 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 618 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
615 spin_unlock(&nvbo->bo.bdev->fence_lock); 619 spin_unlock(&nvbo->bo.bdev->fence_lock);
616 if (ret) { 620 if (ret) {
617 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 621 NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
618 break; 622 break;
619 } 623 }
620 624
@@ -629,62 +633,67 @@ int
629nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 633nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
630 struct drm_file *file_priv) 634 struct drm_file *file_priv)
631{ 635{
632 struct drm_nouveau_private *dev_priv = dev->dev_private; 636 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
637 struct nouveau_abi16_chan *temp;
638 struct nouveau_drm *drm = nouveau_drm(dev);
633 struct drm_nouveau_gem_pushbuf *req = data; 639 struct drm_nouveau_gem_pushbuf *req = data;
634 struct drm_nouveau_gem_pushbuf_push *push; 640 struct drm_nouveau_gem_pushbuf_push *push;
635 struct drm_nouveau_gem_pushbuf_bo *bo; 641 struct drm_nouveau_gem_pushbuf_bo *bo;
636 struct nouveau_channel *chan; 642 struct nouveau_channel *chan = NULL;
637 struct validate_op op; 643 struct validate_op op;
638 struct nouveau_fence *fence = NULL; 644 struct nouveau_fence *fence = NULL;
639 int i, j, ret = 0, do_reloc = 0; 645 int i, j, ret = 0, do_reloc = 0;
640 646
641 chan = nouveau_channel_get(file_priv, req->channel); 647 if (unlikely(!abi16))
642 if (IS_ERR(chan)) 648 return -ENOMEM;
643 return PTR_ERR(chan); 649
650 list_for_each_entry(temp, &abi16->channels, head) {
651 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
652 chan = temp->chan;
653 break;
654 }
655 }
644 656
645 req->vram_available = dev_priv->fb_aper_free; 657 if (!chan)
646 req->gart_available = dev_priv->gart_info.aper_free; 658 return nouveau_abi16_put(abi16, -ENOENT);
659
660 req->vram_available = drm->gem.vram_available;
661 req->gart_available = drm->gem.gart_available;
647 if (unlikely(req->nr_push == 0)) 662 if (unlikely(req->nr_push == 0))
648 goto out_next; 663 goto out_next;
649 664
650 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 665 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
651 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", 666 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
652 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 667 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
653 nouveau_channel_put(&chan); 668 return nouveau_abi16_put(abi16, -EINVAL);
654 return -EINVAL;
655 } 669 }
656 670
657 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 671 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
658 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", 672 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
659 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 673 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
660 nouveau_channel_put(&chan); 674 return nouveau_abi16_put(abi16, -EINVAL);
661 return -EINVAL;
662 } 675 }
663 676
664 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 677 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
665 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", 678 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
666 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 679 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
667 nouveau_channel_put(&chan); 680 return nouveau_abi16_put(abi16, -EINVAL);
668 return -EINVAL;
669 } 681 }
670 682
671 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 683 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
672 if (IS_ERR(push)) { 684 if (IS_ERR(push))
673 nouveau_channel_put(&chan); 685 return nouveau_abi16_put(abi16, PTR_ERR(push));
674 return PTR_ERR(push);
675 }
676 686
677 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 687 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
678 if (IS_ERR(bo)) { 688 if (IS_ERR(bo)) {
679 kfree(push); 689 kfree(push);
680 nouveau_channel_put(&chan); 690 return nouveau_abi16_put(abi16, PTR_ERR(bo));
681 return PTR_ERR(bo);
682 } 691 }
683 692
684 /* Ensure all push buffers are on validate list */ 693 /* Ensure all push buffers are on validate list */
685 for (i = 0; i < req->nr_push; i++) { 694 for (i = 0; i < req->nr_push; i++) {
686 if (push[i].bo_index >= req->nr_buffers) { 695 if (push[i].bo_index >= req->nr_buffers) {
687 NV_ERROR(dev, "push %d buffer not in list\n", i); 696 NV_ERROR(drm, "push %d buffer not in list\n", i);
688 ret = -EINVAL; 697 ret = -EINVAL;
689 goto out_prevalid; 698 goto out_prevalid;
690 } 699 }
@@ -695,7 +704,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
695 req->nr_buffers, &op, &do_reloc); 704 req->nr_buffers, &op, &do_reloc);
696 if (ret) { 705 if (ret) {
697 if (ret != -ERESTARTSYS) 706 if (ret != -ERESTARTSYS)
698 NV_ERROR(dev, "validate: %d\n", ret); 707 NV_ERROR(drm, "validate: %d\n", ret);
699 goto out_prevalid; 708 goto out_prevalid;
700 } 709 }
701 710
@@ -703,7 +712,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
703 if (do_reloc) { 712 if (do_reloc) {
704 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); 713 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
705 if (ret) { 714 if (ret) {
706 NV_ERROR(dev, "reloc apply: %d\n", ret); 715 NV_ERROR(drm, "reloc apply: %d\n", ret);
707 goto out; 716 goto out;
708 } 717 }
709 } 718 }
@@ -711,7 +720,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
711 if (chan->dma.ib_max) { 720 if (chan->dma.ib_max) {
712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 721 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
713 if (ret) { 722 if (ret) {
714 NV_INFO(dev, "nv50cal_space: %d\n", ret); 723 NV_ERROR(drm, "nv50cal_space: %d\n", ret);
715 goto out; 724 goto out;
716 } 725 }
717 726
@@ -723,36 +732,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
723 push[i].length); 732 push[i].length);
724 } 733 }
725 } else 734 } else
726 if (dev_priv->chipset >= 0x25) { 735 if (nv_device(drm->device)->chipset >= 0x25) {
727 ret = RING_SPACE(chan, req->nr_push * 2); 736 ret = RING_SPACE(chan, req->nr_push * 2);
728 if (ret) { 737 if (ret) {
729 NV_ERROR(dev, "cal_space: %d\n", ret); 738 NV_ERROR(drm, "cal_space: %d\n", ret);
730 goto out; 739 goto out;
731 } 740 }
732 741
733 for (i = 0; i < req->nr_push; i++) { 742 for (i = 0; i < req->nr_push; i++) {
734 struct nouveau_bo *nvbo = (void *)(unsigned long) 743 struct nouveau_bo *nvbo = (void *)(unsigned long)
735 bo[push[i].bo_index].user_priv; 744 bo[push[i].bo_index].user_priv;
736 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
737 745
738 OUT_RING(chan, ((mem->start << PAGE_SHIFT) + 746 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
739 push[i].offset) | 2);
740 OUT_RING(chan, 0); 747 OUT_RING(chan, 0);
741 } 748 }
742 } else { 749 } else {
743 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 750 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
744 if (ret) { 751 if (ret) {
745 NV_ERROR(dev, "jmp_space: %d\n", ret); 752 NV_ERROR(drm, "jmp_space: %d\n", ret);
746 goto out; 753 goto out;
747 } 754 }
748 755
749 for (i = 0; i < req->nr_push; i++) { 756 for (i = 0; i < req->nr_push; i++) {
750 struct nouveau_bo *nvbo = (void *)(unsigned long) 757 struct nouveau_bo *nvbo = (void *)(unsigned long)
751 bo[push[i].bo_index].user_priv; 758 bo[push[i].bo_index].user_priv;
752 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
753 uint32_t cmd; 759 uint32_t cmd;
754 760
755 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); 761 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
756 cmd |= 0x20000000; 762 cmd |= 0x20000000;
757 if (unlikely(cmd != req->suffix0)) { 763 if (unlikely(cmd != req->suffix0)) {
758 if (!nvbo->kmap.virtual) { 764 if (!nvbo->kmap.virtual) {
@@ -771,8 +777,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
771 push[i].length - 8) / 4, cmd); 777 push[i].length - 8) / 4, cmd);
772 } 778 }
773 779
774 OUT_RING(chan, ((mem->start << PAGE_SHIFT) + 780 OUT_RING(chan, 0x20000000 |
775 push[i].offset) | 0x20000000); 781 (nvbo->bo.offset + push[i].offset));
776 OUT_RING(chan, 0); 782 OUT_RING(chan, 0);
777 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 783 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
778 OUT_RING(chan, 0); 784 OUT_RING(chan, 0);
@@ -781,7 +787,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
781 787
782 ret = nouveau_fence_new(chan, &fence); 788 ret = nouveau_fence_new(chan, &fence);
783 if (ret) { 789 if (ret) {
784 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 790 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
785 WIND_RING(chan); 791 WIND_RING(chan);
786 goto out; 792 goto out;
787 } 793 }
@@ -799,17 +805,16 @@ out_next:
799 req->suffix0 = 0x00000000; 805 req->suffix0 = 0x00000000;
800 req->suffix1 = 0x00000000; 806 req->suffix1 = 0x00000000;
801 } else 807 } else
802 if (dev_priv->chipset >= 0x25) { 808 if (nv_device(drm->device)->chipset >= 0x25) {
803 req->suffix0 = 0x00020000; 809 req->suffix0 = 0x00020000;
804 req->suffix1 = 0x00000000; 810 req->suffix1 = 0x00000000;
805 } else { 811 } else {
806 req->suffix0 = 0x20000000 | 812 req->suffix0 = 0x20000000 |
807 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); 813 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
808 req->suffix1 = 0x00000000; 814 req->suffix1 = 0x00000000;
809 } 815 }
810 816
811 nouveau_channel_put(&chan); 817 return nouveau_abi16_put(abi16, ret);
812 return ret;
813} 818}
814 819
815static inline uint32_t 820static inline uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
new file mode 100644
index 000000000000..085ece91c395
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -0,0 +1,43 @@
1#ifndef __NOUVEAU_GEM_H__
2#define __NOUVEAU_GEM_H__
3
4#include "drmP.h"
5
6#include <nouveau_drm.h>
7#include "nouveau_bo.h"
8
9#define nouveau_bo_tile_layout(nvbo) \
10 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
11
12static inline struct nouveau_bo *
13nouveau_gem_object(struct drm_gem_object *gem)
14{
15 return gem ? gem->driver_private : NULL;
16}
17
18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align,
20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **);
22extern int nouveau_gem_object_new(struct drm_gem_object *);
23extern void nouveau_gem_object_del(struct drm_gem_object *);
24extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
25extern void nouveau_gem_object_close(struct drm_gem_object *,
26 struct drm_file *);
27extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
28 struct drm_file *);
29extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
30 struct drm_file *);
31extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
32 struct drm_file *);
33extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
34 struct drm_file *);
35extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
36 struct drm_file *);
37
38extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
39 struct drm_gem_object *obj, int flags);
40extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
41 struct dma_buf *dma_buf);
42
43#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
deleted file mode 100644
index 82c19e82ff02..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_gpio.h"
29
30static u8 *
31dcb_gpio_table(struct drm_device *dev)
32{
33 u8 *dcb = dcb_table(dev);
34 if (dcb) {
35 if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
36 return ROMPTR(dev, dcb[0x0a]);
37 if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
38 return ROMPTR(dev, dcb[-15]);
39 }
40 return NULL;
41}
42
43static u8 *
44dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
45{
46 u8 *table = dcb_gpio_table(dev);
47 if (table) {
48 *version = table[0];
49 if (*version < 0x30 && ent < table[2])
50 return table + 3 + (ent * table[1]);
51 else if (ent < table[2])
52 return table + table[1] + (ent * table[3]);
53 }
54 return NULL;
55}
56
57int
58nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
62
63 return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
64}
65
66int
67nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
71
72 return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
73}
74
75int
76nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
77 struct gpio_func *gpio)
78{
79 u8 *table, *entry, version;
80 int i = -1;
81
82 if (line == 0xff && func == 0xff)
83 return -EINVAL;
84
85 while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
86 if (version < 0x40) {
87 u16 data = ROM16(entry[0]);
88 *gpio = (struct gpio_func) {
89 .line = (data & 0x001f) >> 0,
90 .func = (data & 0x07e0) >> 5,
91 .log[0] = (data & 0x1800) >> 11,
92 .log[1] = (data & 0x6000) >> 13,
93 };
94 } else
95 if (version < 0x41) {
96 *gpio = (struct gpio_func) {
97 .line = entry[0] & 0x1f,
98 .func = entry[1],
99 .log[0] = (entry[3] & 0x18) >> 3,
100 .log[1] = (entry[3] & 0x60) >> 5,
101 };
102 } else {
103 *gpio = (struct gpio_func) {
104 .line = entry[0] & 0x3f,
105 .func = entry[1],
106 .log[0] = (entry[4] & 0x30) >> 4,
107 .log[1] = (entry[4] & 0xc0) >> 6,
108 };
109 }
110
111 if ((line == 0xff || line == gpio->line) &&
112 (func == 0xff || func == gpio->func))
113 return 0;
114 }
115
116 /* DCB 2.2, fixed TVDAC GPIO data */
117 if ((table = dcb_table(dev)) && table[0] >= 0x22) {
118 if (func == DCB_GPIO_TVDAC0) {
119 *gpio = (struct gpio_func) {
120 .func = DCB_GPIO_TVDAC0,
121 .line = table[-4] >> 4,
122 .log[0] = !!(table[-5] & 2),
123 .log[1] = !(table[-5] & 2),
124 };
125 return 0;
126 }
127 }
128
129 /* Apple iMac G4 NV18 */
130 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
131 if (func == DCB_GPIO_TVDAC0) {
132 *gpio = (struct gpio_func) {
133 .func = DCB_GPIO_TVDAC0,
134 .line = 4,
135 .log[0] = 0,
136 .log[1] = 1,
137 };
138 return 0;
139 }
140 }
141
142 return -EINVAL;
143}
144
145int
146nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
147{
148 struct gpio_func gpio;
149 int ret;
150
151 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
152 if (ret == 0) {
153 int dir = !!(gpio.log[state] & 0x02);
154 int out = !!(gpio.log[state] & 0x01);
155 ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
156 }
157
158 return ret;
159}
160
161int
162nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
163{
164 struct gpio_func gpio;
165 int ret;
166
167 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
168 if (ret == 0) {
169 ret = nouveau_gpio_sense(dev, idx, gpio.line);
170 if (ret >= 0)
171 ret = (ret == (gpio.log[1] & 1));
172 }
173
174 return ret;
175}
176
177int
178nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
179{
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
182 struct gpio_func gpio;
183 int ret;
184
185 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
186 if (ret == 0) {
187 if (idx == 0 && pgpio->irq_enable)
188 pgpio->irq_enable(dev, gpio.line, on);
189 else
190 ret = -ENODEV;
191 }
192
193 return ret;
194}
195
196struct gpio_isr {
197 struct drm_device *dev;
198 struct list_head head;
199 struct work_struct work;
200 int idx;
201 struct gpio_func func;
202 void (*handler)(void *, int);
203 void *data;
204 bool inhibit;
205};
206
207static void
208nouveau_gpio_isr_bh(struct work_struct *work)
209{
210 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
211 struct drm_device *dev = isr->dev;
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
214 unsigned long flags;
215 int state;
216
217 state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
218 if (state >= 0)
219 isr->handler(isr->data, state);
220
221 spin_lock_irqsave(&pgpio->lock, flags);
222 isr->inhibit = false;
223 spin_unlock_irqrestore(&pgpio->lock, flags);
224}
225
226void
227nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
228{
229 struct drm_nouveau_private *dev_priv = dev->dev_private;
230 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
231 struct gpio_isr *isr;
232
233 if (idx != 0)
234 return;
235
236 spin_lock(&pgpio->lock);
237 list_for_each_entry(isr, &pgpio->isr, head) {
238 if (line_mask & (1 << isr->func.line)) {
239 if (isr->inhibit)
240 continue;
241 isr->inhibit = true;
242 schedule_work(&isr->work);
243 }
244 }
245 spin_unlock(&pgpio->lock);
246}
247
248int
249nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
250 void (*handler)(void *, int), void *data)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
254 struct gpio_isr *isr;
255 unsigned long flags;
256 int ret;
257
258 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
259 if (!isr)
260 return -ENOMEM;
261
262 ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
263 if (ret) {
264 kfree(isr);
265 return ret;
266 }
267
268 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
269 isr->dev = dev;
270 isr->handler = handler;
271 isr->data = data;
272 isr->idx = idx;
273
274 spin_lock_irqsave(&pgpio->lock, flags);
275 list_add(&isr->head, &pgpio->isr);
276 spin_unlock_irqrestore(&pgpio->lock, flags);
277 return 0;
278}
279
280void
281nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
282 void (*handler)(void *, int), void *data)
283{
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
286 struct gpio_isr *isr, *tmp;
287 struct gpio_func func;
288 unsigned long flags;
289 LIST_HEAD(tofree);
290 int ret;
291
292 ret = nouveau_gpio_find(dev, idx, tag, line, &func);
293 if (ret == 0) {
294 spin_lock_irqsave(&pgpio->lock, flags);
295 list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
296 if (memcmp(&isr->func, &func, sizeof(func)) ||
297 isr->idx != idx ||
298 isr->handler != handler || isr->data != data)
299 continue;
300 list_move(&isr->head, &tofree);
301 }
302 spin_unlock_irqrestore(&pgpio->lock, flags);
303
304 list_for_each_entry_safe(isr, tmp, &tofree, head) {
305 flush_work_sync(&isr->work);
306 kfree(isr);
307 }
308 }
309}
310
311int
312nouveau_gpio_create(struct drm_device *dev)
313{
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
316
317 INIT_LIST_HEAD(&pgpio->isr);
318 spin_lock_init(&pgpio->lock);
319
320 return nouveau_gpio_init(dev);
321}
322
323void
324nouveau_gpio_destroy(struct drm_device *dev)
325{
326 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
328
329 nouveau_gpio_fini(dev);
330 BUG_ON(!list_empty(&pgpio->isr));
331}
332
333int
334nouveau_gpio_init(struct drm_device *dev)
335{
336 struct drm_nouveau_private *dev_priv = dev->dev_private;
337 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
338 int ret = 0;
339
340 if (pgpio->init)
341 ret = pgpio->init(dev);
342
343 return ret;
344}
345
346void
347nouveau_gpio_fini(struct drm_device *dev)
348{
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
351
352 if (pgpio->fini)
353 pgpio->fini(dev);
354}
355
356void
357nouveau_gpio_reset(struct drm_device *dev)
358{
359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 u8 *entry, version;
361 int ent = -1;
362
363 while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
364 u8 func = 0xff, line, defs, unk0, unk1;
365 if (version >= 0x41) {
366 defs = !!(entry[0] & 0x80);
367 line = entry[0] & 0x3f;
368 func = entry[1];
369 unk0 = entry[2];
370 unk1 = entry[3] & 0x1f;
371 } else
372 if (version >= 0x40) {
373 line = entry[0] & 0x1f;
374 func = entry[1];
375 defs = !!(entry[3] & 0x01);
376 unk0 = !!(entry[3] & 0x02);
377 unk1 = !!(entry[3] & 0x04);
378 } else {
379 break;
380 }
381
382 if (func == 0xff)
383 continue;
384
385 nouveau_gpio_func_set(dev, func, defs);
386
387 if (dev_priv->card_type >= NV_D0) {
388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
389 if (unk1--)
390 nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
391 } else
392 if (dev_priv->card_type >= NV_50) {
393 static const u32 regs[] = { 0xe100, 0xe28c };
394 u32 val = (unk1 << 16) | unk0;
395 u32 reg = regs[line >> 4]; line &= 0x0f;
396
397 nv_mask(dev, reg, 0x00010001 << line, val << line);
398 }
399 }
400}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
deleted file mode 100644
index 64c5cb077ace..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_GPIO_H__
24#define __NOUVEAU_GPIO_H__
25
26struct gpio_func {
27 u8 func;
28 u8 line;
29 u8 log[2];
30};
31
32/* nouveau_gpio.c */
33int nouveau_gpio_create(struct drm_device *);
34void nouveau_gpio_destroy(struct drm_device *);
35int nouveau_gpio_init(struct drm_device *);
36void nouveau_gpio_fini(struct drm_device *);
37void nouveau_gpio_reset(struct drm_device *);
38int nouveau_gpio_drive(struct drm_device *, int idx, int line,
39 int dir, int out);
40int nouveau_gpio_sense(struct drm_device *, int idx, int line);
41int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
42 struct gpio_func *);
43int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
44int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
45int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
46void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
47int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
48 void (*)(void *, int state), void *data);
49void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
50 void (*)(void *, int state), void *data);
51
52static inline bool
53nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
54{
55 struct gpio_func func;
56 return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
57}
58
59static inline int
60nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
61{
62 return nouveau_gpio_set(dev, 0, tag, 0xff, state);
63}
64
65static inline int
66nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
67{
68 return nouveau_gpio_get(dev, 0, tag, 0xff);
69}
70
71#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
deleted file mode 100644
index bd79fedb7054..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ /dev/null
@@ -1,808 +0,0 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37#include "nouveau_fifo.h"
38#include "nouveau_ramht.h"
39#include "nouveau_software.h"
40#include "nouveau_vm.h"
41
42struct nouveau_gpuobj_method {
43 struct list_head head;
44 u32 mthd;
45 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
46};
47
48struct nouveau_gpuobj_class {
49 struct list_head head;
50 struct list_head methods;
51 u32 id;
52 u32 engine;
53};
54
55int
56nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_gpuobj_class *oc;
60
61 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
62 if (!oc)
63 return -ENOMEM;
64
65 INIT_LIST_HEAD(&oc->methods);
66 oc->id = class;
67 oc->engine = engine;
68 list_add(&oc->head, &dev_priv->classes);
69 return 0;
70}
71
72int
73nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
74 int (*exec)(struct nouveau_channel *, u32, u32, u32))
75{
76 struct drm_nouveau_private *dev_priv = dev->dev_private;
77 struct nouveau_gpuobj_method *om;
78 struct nouveau_gpuobj_class *oc;
79
80 list_for_each_entry(oc, &dev_priv->classes, head) {
81 if (oc->id == class)
82 goto found;
83 }
84
85 return -EINVAL;
86
87found:
88 om = kzalloc(sizeof(*om), GFP_KERNEL);
89 if (!om)
90 return -ENOMEM;
91
92 om->mthd = mthd;
93 om->exec = exec;
94 list_add(&om->head, &oc->methods);
95 return 0;
96}
97
98int
99nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
100 u32 class, u32 mthd, u32 data)
101{
102 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
103 struct nouveau_gpuobj_method *om;
104 struct nouveau_gpuobj_class *oc;
105
106 list_for_each_entry(oc, &dev_priv->classes, head) {
107 if (oc->id != class)
108 continue;
109
110 list_for_each_entry(om, &oc->methods, head) {
111 if (om->mthd == mthd)
112 return om->exec(chan, class, mthd, data);
113 }
114 }
115
116 return -ENOENT;
117}
118
119int
120nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
121 u32 class, u32 mthd, u32 data)
122{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
125 struct nouveau_channel *chan = NULL;
126 unsigned long flags;
127 int ret = -EINVAL;
128
129 spin_lock_irqsave(&dev_priv->channels.lock, flags);
130 if (chid >= 0 && chid < pfifo->channels)
131 chan = dev_priv->channels.ptr[chid];
132 if (chan)
133 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
134 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
135 return ret;
136}
137
138int
139nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
140 uint32_t size, int align, uint32_t flags,
141 struct nouveau_gpuobj **gpuobj_ret)
142{
143 struct drm_nouveau_private *dev_priv = dev->dev_private;
144 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
145 struct nouveau_gpuobj *gpuobj;
146 struct drm_mm_node *ramin = NULL;
147 int ret, i;
148
149 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
150 chan ? chan->id : -1, size, align, flags);
151
152 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
153 if (!gpuobj)
154 return -ENOMEM;
155 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
156 gpuobj->dev = dev;
157 gpuobj->flags = flags;
158 kref_init(&gpuobj->refcount);
159 gpuobj->size = size;
160
161 spin_lock(&dev_priv->ramin_lock);
162 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
163 spin_unlock(&dev_priv->ramin_lock);
164
165 if (!(flags & NVOBJ_FLAG_VM) && chan) {
166 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
167 if (ramin)
168 ramin = drm_mm_get_block(ramin, size, align);
169 if (!ramin) {
170 nouveau_gpuobj_ref(NULL, &gpuobj);
171 return -ENOMEM;
172 }
173
174 gpuobj->pinst = chan->ramin->pinst;
175 if (gpuobj->pinst != ~0)
176 gpuobj->pinst += ramin->start;
177
178 gpuobj->cinst = ramin->start;
179 gpuobj->vinst = ramin->start + chan->ramin->vinst;
180 gpuobj->node = ramin;
181 } else {
182 ret = instmem->get(gpuobj, chan, size, align);
183 if (ret) {
184 nouveau_gpuobj_ref(NULL, &gpuobj);
185 return ret;
186 }
187
188 ret = -ENOSYS;
189 if (!(flags & NVOBJ_FLAG_DONT_MAP))
190 ret = instmem->map(gpuobj);
191 if (ret)
192 gpuobj->pinst = ~0;
193
194 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
195 }
196
197 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
198 for (i = 0; i < gpuobj->size; i += 4)
199 nv_wo32(gpuobj, i, 0);
200 instmem->flush(dev);
201 }
202
203
204 *gpuobj_ret = gpuobj;
205 return 0;
206}
207
208int
209nouveau_gpuobj_init(struct drm_device *dev)
210{
211 struct drm_nouveau_private *dev_priv = dev->dev_private;
212
213 NV_DEBUG(dev, "\n");
214
215 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
216 INIT_LIST_HEAD(&dev_priv->classes);
217 spin_lock_init(&dev_priv->ramin_lock);
218 dev_priv->ramin_base = ~0;
219
220 return 0;
221}
222
223void
224nouveau_gpuobj_takedown(struct drm_device *dev)
225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_gpuobj_method *om, *tm;
228 struct nouveau_gpuobj_class *oc, *tc;
229
230 NV_DEBUG(dev, "\n");
231
232 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
233 list_for_each_entry_safe(om, tm, &oc->methods, head) {
234 list_del(&om->head);
235 kfree(om);
236 }
237 list_del(&oc->head);
238 kfree(oc);
239 }
240
241 WARN_ON(!list_empty(&dev_priv->gpuobj_list));
242}
243
244
245static void
246nouveau_gpuobj_del(struct kref *ref)
247{
248 struct nouveau_gpuobj *gpuobj =
249 container_of(ref, struct nouveau_gpuobj, refcount);
250 struct drm_device *dev = gpuobj->dev;
251 struct drm_nouveau_private *dev_priv = dev->dev_private;
252 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
253 int i;
254
255 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
256
257 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
258 for (i = 0; i < gpuobj->size; i += 4)
259 nv_wo32(gpuobj, i, 0);
260 instmem->flush(dev);
261 }
262
263 if (gpuobj->dtor)
264 gpuobj->dtor(dev, gpuobj);
265
266 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
267 if (gpuobj->node) {
268 instmem->unmap(gpuobj);
269 instmem->put(gpuobj);
270 }
271 } else {
272 if (gpuobj->node) {
273 spin_lock(&dev_priv->ramin_lock);
274 drm_mm_put_block(gpuobj->node);
275 spin_unlock(&dev_priv->ramin_lock);
276 }
277 }
278
279 spin_lock(&dev_priv->ramin_lock);
280 list_del(&gpuobj->list);
281 spin_unlock(&dev_priv->ramin_lock);
282
283 kfree(gpuobj);
284}
285
286void
287nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
288{
289 if (ref)
290 kref_get(&ref->refcount);
291
292 if (*ptr)
293 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
294
295 *ptr = ref;
296}
297
298int
299nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
300 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
301{
302 struct drm_nouveau_private *dev_priv = dev->dev_private;
303 struct nouveau_gpuobj *gpuobj = NULL;
304 int i;
305
306 NV_DEBUG(dev,
307 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
308 pinst, vinst, size, flags);
309
310 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
311 if (!gpuobj)
312 return -ENOMEM;
313 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
314 gpuobj->dev = dev;
315 gpuobj->flags = flags;
316 kref_init(&gpuobj->refcount);
317 gpuobj->size = size;
318 gpuobj->pinst = pinst;
319 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
320 gpuobj->vinst = vinst;
321
322 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
323 for (i = 0; i < gpuobj->size; i += 4)
324 nv_wo32(gpuobj, i, 0);
325 dev_priv->engine.instmem.flush(dev);
326 }
327
328 spin_lock(&dev_priv->ramin_lock);
329 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
330 spin_unlock(&dev_priv->ramin_lock);
331 *pgpuobj = gpuobj;
332 return 0;
333}
334
335void
336nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
337 u64 base, u64 size, int target, int access,
338 u32 type, u32 comp)
339{
340 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
341 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
342 u32 flags0;
343
344 flags0 = (comp << 29) | (type << 22) | class;
345 flags0 |= 0x00100000;
346
347 switch (access) {
348 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
349 case NV_MEM_ACCESS_RW:
350 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
351 default:
352 break;
353 }
354
355 switch (target) {
356 case NV_MEM_TARGET_VRAM:
357 flags0 |= 0x00010000;
358 break;
359 case NV_MEM_TARGET_PCI:
360 flags0 |= 0x00020000;
361 break;
362 case NV_MEM_TARGET_PCI_NOSNOOP:
363 flags0 |= 0x00030000;
364 break;
365 case NV_MEM_TARGET_GART:
366 base += dev_priv->gart_info.aper_base;
367 default:
368 flags0 &= ~0x00100000;
369 break;
370 }
371
372 /* convert to base + limit */
373 size = (base + size) - 1;
374
375 nv_wo32(obj, offset + 0x00, flags0);
376 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
377 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
378 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
379 upper_32_bits(base));
380 nv_wo32(obj, offset + 0x10, 0x00000000);
381 nv_wo32(obj, offset + 0x14, 0x00000000);
382
383 pinstmem->flush(obj->dev);
384}
385
386int
387nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
388 int target, int access, u32 type, u32 comp,
389 struct nouveau_gpuobj **pobj)
390{
391 struct drm_device *dev = chan->dev;
392 int ret;
393
394 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
395 if (ret)
396 return ret;
397
398 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
399 access, type, comp);
400 return 0;
401}
402
403int
404nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
405 u64 size, int access, int target,
406 struct nouveau_gpuobj **pobj)
407{
408 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
409 struct drm_device *dev = chan->dev;
410 struct nouveau_gpuobj *obj;
411 u32 flags0, flags2;
412 int ret;
413
414 if (dev_priv->card_type >= NV_50) {
415 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
416 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
417
418 return nv50_gpuobj_dma_new(chan, class, base, size,
419 target, access, type, comp, pobj);
420 }
421
422 if (target == NV_MEM_TARGET_GART) {
423 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
424
425 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
426 if (base == 0) {
427 nouveau_gpuobj_ref(gart, pobj);
428 return 0;
429 }
430
431 base = nouveau_sgdma_get_physical(dev, base);
432 target = NV_MEM_TARGET_PCI;
433 } else {
434 base += dev_priv->gart_info.aper_base;
435 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
436 target = NV_MEM_TARGET_PCI_NOSNOOP;
437 else
438 target = NV_MEM_TARGET_PCI;
439 }
440 }
441
442 flags0 = class;
443 flags0 |= 0x00003000; /* PT present, PT linear */
444 flags2 = 0;
445
446 switch (target) {
447 case NV_MEM_TARGET_PCI:
448 flags0 |= 0x00020000;
449 break;
450 case NV_MEM_TARGET_PCI_NOSNOOP:
451 flags0 |= 0x00030000;
452 break;
453 default:
454 break;
455 }
456
457 switch (access) {
458 case NV_MEM_ACCESS_RO:
459 flags0 |= 0x00004000;
460 break;
461 case NV_MEM_ACCESS_WO:
462 flags0 |= 0x00008000;
463 default:
464 flags2 |= 0x00000002;
465 break;
466 }
467
468 flags0 |= (base & 0x00000fff) << 20;
469 flags2 |= (base & 0xfffff000);
470
471 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
472 if (ret)
473 return ret;
474
475 nv_wo32(obj, 0x00, flags0);
476 nv_wo32(obj, 0x04, size - 1);
477 nv_wo32(obj, 0x08, flags2);
478 nv_wo32(obj, 0x0c, flags2);
479
480 obj->engine = NVOBJ_ENGINE_SW;
481 obj->class = class;
482 *pobj = obj;
483 return 0;
484}
485
486int
487nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
488{
489 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
490 struct drm_device *dev = chan->dev;
491 struct nouveau_gpuobj_class *oc;
492 int ret;
493
494 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
495
496 list_for_each_entry(oc, &dev_priv->classes, head) {
497 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
498
499 if (oc->id != class)
500 continue;
501
502 if (!chan->engctx[oc->engine]) {
503 ret = eng->context_new(chan, oc->engine);
504 if (ret)
505 return ret;
506 }
507
508 return eng->object_new(chan, oc->engine, handle, class);
509 }
510
511 return -EINVAL;
512}
513
514static int
515nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
516{
517 struct drm_device *dev = chan->dev;
518 struct drm_nouveau_private *dev_priv = dev->dev_private;
519 uint32_t size;
520 uint32_t base;
521 int ret;
522
523 NV_DEBUG(dev, "ch%d\n", chan->id);
524
525 /* Base amount for object storage (4KiB enough?) */
526 size = 0x2000;
527 base = 0;
528
529 if (dev_priv->card_type == NV_50) {
530 /* Various fixed table thingos */
531 size += 0x1400; /* mostly unknown stuff */
532 size += 0x4000; /* vm pd */
533 base = 0x6000;
534 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
535 size += 0x8000;
536 /* RAMFC */
537 size += 0x1000;
538 }
539
540 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
541 if (ret) {
542 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
543 return ret;
544 }
545
546 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
547 if (ret) {
548 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
549 nouveau_gpuobj_ref(NULL, &chan->ramin);
550 return ret;
551 }
552
553 return 0;
554}
555
556static int
557nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
558{
559 struct drm_device *dev = chan->dev;
560 struct nouveau_gpuobj *pgd = NULL;
561 struct nouveau_vm_pgd *vpgd;
562 int ret;
563
564 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
565 if (ret)
566 return ret;
567
568 /* create page directory for this vm if none currently exists,
569 * will be destroyed automagically when last reference to the
570 * vm is removed
571 */
572 if (list_empty(&vm->pgd_list)) {
573 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
574 if (ret)
575 return ret;
576 }
577 nouveau_vm_ref(vm, &chan->vm, pgd);
578 nouveau_gpuobj_ref(NULL, &pgd);
579
580 /* point channel at vm's page directory */
581 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
582 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
583 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
584 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
585 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
586
587 return 0;
588}
589
590int
591nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
592 uint32_t vram_h, uint32_t tt_h)
593{
594 struct drm_device *dev = chan->dev;
595 struct drm_nouveau_private *dev_priv = dev->dev_private;
596 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
597 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
598 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
599 int ret;
600
601 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
602 if (dev_priv->card_type >= NV_C0)
603 return nvc0_gpuobj_channel_init(chan, vm);
604
605 /* Allocate a chunk of memory for per-channel object storage */
606 ret = nouveau_gpuobj_channel_init_pramin(chan);
607 if (ret) {
608 NV_ERROR(dev, "init pramin\n");
609 return ret;
610 }
611
612 /* NV50 VM
613 * - Allocate per-channel page-directory
614 * - Link with shared channel VM
615 */
616 if (vm) {
617 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
618 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
619 u32 vm_pinst = chan->ramin->pinst;
620
621 if (vm_pinst != ~0)
622 vm_pinst += pgd_offs;
623
624 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
625 0, &chan->vm_pd);
626 if (ret)
627 return ret;
628
629 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
630 }
631
632 /* RAMHT */
633 if (dev_priv->card_type < NV_50) {
634 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
635 } else {
636 struct nouveau_gpuobj *ramht = NULL;
637
638 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
639 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
640 if (ret)
641 return ret;
642
643 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
644 nouveau_gpuobj_ref(NULL, &ramht);
645 if (ret)
646 return ret;
647 }
648
649 /* VRAM ctxdma */
650 if (dev_priv->card_type >= NV_50) {
651 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
652 0, (1ULL << 40), NV_MEM_ACCESS_RW,
653 NV_MEM_TARGET_VM, &vram);
654 if (ret) {
655 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
656 return ret;
657 }
658 } else {
659 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
660 0, dev_priv->fb_available_size,
661 NV_MEM_ACCESS_RW,
662 NV_MEM_TARGET_VRAM, &vram);
663 if (ret) {
664 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
665 return ret;
666 }
667 }
668
669 ret = nouveau_ramht_insert(chan, vram_h, vram);
670 nouveau_gpuobj_ref(NULL, &vram);
671 if (ret) {
672 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
673 return ret;
674 }
675
676 /* TT memory ctxdma */
677 if (dev_priv->card_type >= NV_50) {
678 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
679 0, (1ULL << 40), NV_MEM_ACCESS_RW,
680 NV_MEM_TARGET_VM, &tt);
681 } else {
682 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
683 0, dev_priv->gart_info.aper_size,
684 NV_MEM_ACCESS_RW,
685 NV_MEM_TARGET_GART, &tt);
686 }
687
688 if (ret) {
689 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
690 return ret;
691 }
692
693 ret = nouveau_ramht_insert(chan, tt_h, tt);
694 nouveau_gpuobj_ref(NULL, &tt);
695 if (ret) {
696 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
697 return ret;
698 }
699
700 return 0;
701}
702
703void
704nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
705{
706 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
707
708 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
709 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
710
711 if (drm_mm_initialized(&chan->ramin_heap))
712 drm_mm_takedown(&chan->ramin_heap);
713 nouveau_gpuobj_ref(NULL, &chan->ramin);
714}
715
716int
717nouveau_gpuobj_suspend(struct drm_device *dev)
718{
719 struct drm_nouveau_private *dev_priv = dev->dev_private;
720 struct nouveau_gpuobj *gpuobj;
721 int i;
722
723 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
724 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
725 continue;
726
727 gpuobj->suspend = vmalloc(gpuobj->size);
728 if (!gpuobj->suspend) {
729 nouveau_gpuobj_resume(dev);
730 return -ENOMEM;
731 }
732
733 for (i = 0; i < gpuobj->size; i += 4)
734 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
735 }
736
737 return 0;
738}
739
740void
741nouveau_gpuobj_resume(struct drm_device *dev)
742{
743 struct drm_nouveau_private *dev_priv = dev->dev_private;
744 struct nouveau_gpuobj *gpuobj;
745 int i;
746
747 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
748 if (!gpuobj->suspend)
749 continue;
750
751 for (i = 0; i < gpuobj->size; i += 4)
752 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
753
754 vfree(gpuobj->suspend);
755 gpuobj->suspend = NULL;
756 }
757
758 dev_priv->engine.instmem.flush(dev);
759}
760
761u32
762nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
763{
764 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
765 struct drm_device *dev = gpuobj->dev;
766 unsigned long flags;
767
768 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
769 u64 ptr = gpuobj->vinst + offset;
770 u32 base = ptr >> 16;
771 u32 val;
772
773 spin_lock_irqsave(&dev_priv->vm_lock, flags);
774 if (dev_priv->ramin_base != base) {
775 dev_priv->ramin_base = base;
776 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
777 }
778 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
779 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
780 return val;
781 }
782
783 return nv_ri32(dev, gpuobj->pinst + offset);
784}
785
786void
787nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
788{
789 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
790 struct drm_device *dev = gpuobj->dev;
791 unsigned long flags;
792
793 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
794 u64 ptr = gpuobj->vinst + offset;
795 u32 base = ptr >> 16;
796
797 spin_lock_irqsave(&dev_priv->vm_lock, flags);
798 if (dev_priv->ramin_base != base) {
799 dev_priv->ramin_base = base;
800 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
801 }
802 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
803 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
804 return;
805 }
806
807 nv_wi32(dev, gpuobj->pinst + offset, val);
808}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index c3de36384522..ea712b5762e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_connector.h" 27#include "nouveau_connector.h"
28#include "nouveau_encoder.h" 28#include "nouveau_encoder.h"
29#include "nouveau_crtc.h" 29#include "nouveau_crtc.h"
@@ -31,10 +31,10 @@
31static bool 31static bool
32hdmi_sor(struct drm_encoder *encoder) 32hdmi_sor(struct drm_encoder *encoder)
33{ 33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 34 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
35 if (dev_priv->chipset < 0xa3 || 35 if (nv_device(drm->device)->chipset < 0xa3 ||
36 dev_priv->chipset == 0xaa || 36 nv_device(drm->device)->chipset == 0xaa ||
37 dev_priv->chipset == 0xac) 37 nv_device(drm->device)->chipset == 0xac)
38 return false; 38 return false;
39 return true; 39 return true;
40} 40}
@@ -52,13 +52,15 @@ hdmi_base(struct drm_encoder *encoder)
52static void 52static void
53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val) 53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
54{ 54{
55 nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val); 55 struct nouveau_device *device = nouveau_dev(encoder->dev);
56 nv_wr32(device, hdmi_base(encoder) + reg, val);
56} 57}
57 58
58static u32 59static u32
59hdmi_rd32(struct drm_encoder *encoder, u32 reg) 60hdmi_rd32(struct drm_encoder *encoder, u32 reg)
60{ 61{
61 return nv_rd32(encoder->dev, hdmi_base(encoder) + reg); 62 struct nouveau_device *device = nouveau_dev(encoder->dev);
63 return nv_rd32(device, hdmi_base(encoder) + reg);
62} 64}
63 65
64static u32 66static u32
@@ -73,12 +75,11 @@ static void
73nouveau_audio_disconnect(struct drm_encoder *encoder) 75nouveau_audio_disconnect(struct drm_encoder *encoder)
74{ 76{
75 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 77 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
76 struct drm_device *dev = encoder->dev; 78 struct nouveau_device *device = nouveau_dev(encoder->dev);
77 u32 or = nv_encoder->or * 0x800; 79 u32 or = nv_encoder->or * 0x800;
78 80
79 if (hdmi_sor(encoder)) { 81 if (hdmi_sor(encoder))
80 nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000); 82 nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
81 }
82} 83}
83 84
84static void 85static void
@@ -86,8 +87,8 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
86 struct drm_display_mode *mode) 87 struct drm_display_mode *mode)
87{ 88{
88 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 89 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
90 struct nouveau_device *device = nouveau_dev(encoder->dev);
89 struct nouveau_connector *nv_connector; 91 struct nouveau_connector *nv_connector;
90 struct drm_device *dev = encoder->dev;
91 u32 or = nv_encoder->or * 0x800; 92 u32 or = nv_encoder->or * 0x800;
92 int i; 93 int i;
93 94
@@ -98,16 +99,16 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
98 } 99 }
99 100
100 if (hdmi_sor(encoder)) { 101 if (hdmi_sor(encoder)) {
101 nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001); 102 nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
102 103
103 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 104 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
104 if (nv_connector->base.eld[0]) { 105 if (nv_connector->base.eld[0]) {
105 u8 *eld = nv_connector->base.eld; 106 u8 *eld = nv_connector->base.eld;
106 for (i = 0; i < eld[2] * 4; i++) 107 for (i = 0; i < eld[2] * 4; i++)
107 nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]); 108 nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
108 for (i = eld[2] * 4; i < 0x60; i++) 109 for (i = eld[2] * 4; i < 0x60; i++)
109 nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00); 110 nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
110 nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002); 111 nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
111 } 112 }
112 } 113 }
113} 114}
@@ -219,9 +220,9 @@ void
219nouveau_hdmi_mode_set(struct drm_encoder *encoder, 220nouveau_hdmi_mode_set(struct drm_encoder *encoder,
220 struct drm_display_mode *mode) 221 struct drm_display_mode *mode)
221{ 222{
223 struct nouveau_device *device = nouveau_dev(encoder->dev);
222 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
223 struct nouveau_connector *nv_connector; 225 struct nouveau_connector *nv_connector;
224 struct drm_device *dev = encoder->dev;
225 u32 max_ac_packet, rekey; 226 u32 max_ac_packet, rekey;
226 227
227 nv_connector = nouveau_encoder_connector_get(nv_encoder); 228 nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -238,9 +239,9 @@ nouveau_hdmi_mode_set(struct drm_encoder *encoder,
238 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ 239 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
239 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ 240 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
240 241
241 nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 242 nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
242 nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 243 nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
243 nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 244 nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
244 245
245 /* value matches nvidia binary driver, and tegra constant */ 246 /* value matches nvidia binary driver, and tegra constant */
246 rekey = 56; 247 rekey = 56;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b87ad3bd7739..a78b24704794 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -23,9 +23,13 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28 28
29#include <subdev/bios/pll.h>
30#include <subdev/clock.h>
31#include <subdev/timer.h>
32
29#define CHIPSET_NFORCE 0x01a0 33#define CHIPSET_NFORCE 0x01a0
30#define CHIPSET_NFORCE2 0x01f0 34#define CHIPSET_NFORCE2 0x01f0
31 35
@@ -82,12 +86,12 @@ NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
82void 86void
83NVSetOwner(struct drm_device *dev, int owner) 87NVSetOwner(struct drm_device *dev, int owner)
84{ 88{
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 struct nouveau_drm *drm = nouveau_drm(dev);
86 90
87 if (owner == 1) 91 if (owner == 1)
88 owner *= 3; 92 owner *= 3;
89 93
90 if (dev_priv->chipset == 0x11) { 94 if (nv_device(drm->device)->chipset == 0x11) {
91 /* This might seem stupid, but the blob does it and 95 /* This might seem stupid, but the blob does it and
92 * omitting it often locks the system up. 96 * omitting it often locks the system up.
93 */ 97 */
@@ -98,7 +102,7 @@ NVSetOwner(struct drm_device *dev, int owner)
98 /* CR44 is always changed on CRTC0 */ 102 /* CR44 is always changed on CRTC0 */
99 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); 103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
100 104
101 if (dev_priv->chipset == 0x11) { /* set me harder */ 105 if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */
102 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 106 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 107 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
104 } 108 }
@@ -123,270 +127,6 @@ NVBlankScreen(struct drm_device *dev, int head, bool blank)
123} 127}
124 128
125/* 129/*
126 * PLL setting
127 */
128
129static int
130powerctrl_1_shift(int chip_version, int reg)
131{
132 int shift = -4;
133
134 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
135 return shift;
136
137 switch (reg) {
138 case NV_RAMDAC_VPLL2:
139 shift += 4;
140 case NV_PRAMDAC_VPLL_COEFF:
141 shift += 4;
142 case NV_PRAMDAC_MPLL_COEFF:
143 shift += 4;
144 case NV_PRAMDAC_NVPLL_COEFF:
145 shift += 4;
146 }
147
148 /*
149 * the shift for vpll regs is only used for nv3x chips with a single
150 * stage pll
151 */
152 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
153 chip_version == 0x36 || chip_version >= 0x40))
154 shift = -4;
155
156 return shift;
157}
158
159static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
167 uint32_t saved_powerctrl_1 = 0;
168 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
169
170 if (oldpll == pll)
171 return; /* already set */
172
173 if (shift_powerctrl_1 >= 0) {
174 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
175 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
176 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
177 1 << shift_powerctrl_1);
178 }
179
180 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
181 /* upclock -- write new post divider first */
182 NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
183 else
184 /* downclock -- write new NM first */
185 NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
186
187 if (chip_version < 0x17 && chip_version != 0x11)
188 /* wait a bit on older chips */
189 msleep(64);
190 NVReadRAMDAC(dev, 0, reg);
191
192 /* then write the other half as well */
193 NVWriteRAMDAC(dev, 0, reg, pll);
194
195 if (shift_powerctrl_1 >= 0)
196 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
197}
198
199static uint32_t
200new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
201{
202 bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
203
204 if (ss) /* single stage pll mode */
205 ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
206 NV_RAMDAC_580_VPLL2_ACTIVE;
207 else
208 ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
209 ~NV_RAMDAC_580_VPLL2_ACTIVE;
210
211 return ramdac580;
212}
213
214static void
215setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
223 uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
224 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
225 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
226 uint32_t oldramdac580 = 0, ramdac580 = 0;
227 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
228 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
229 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
230
231 /* model specific additions to generic pll1 and pll2 set up above */
232 if (nv3035) {
233 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
234 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
235 pll2 = 0;
236 }
237 if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
238 oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
239 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
240 if (oldramdac580 != ramdac580)
241 oldpll1 = ~0; /* force mismatch */
242 if (single_stage)
243 /* magic value used by nvidia in single stage mode */
244 pll2 |= 0x011f;
245 }
246 if (chip_version > 0x70)
247 /* magic bits set by the blob (but not the bios) on g71-73 */
248 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
249
250 if (oldpll1 == pll1 && oldpll2 == pll2)
251 return; /* already set */
252
253 if (shift_powerctrl_1 >= 0) {
254 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
255 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
256 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
257 1 << shift_powerctrl_1);
258 }
259
260 if (chip_version >= 0x40) {
261 int shift_c040 = 14;
262
263 switch (reg1) {
264 case NV_PRAMDAC_MPLL_COEFF:
265 shift_c040 += 2;
266 case NV_PRAMDAC_NVPLL_COEFF:
267 shift_c040 += 2;
268 case NV_RAMDAC_VPLL2:
269 shift_c040 += 2;
270 case NV_PRAMDAC_VPLL_COEFF:
271 shift_c040 += 2;
272 }
273
274 savedc040 = nvReadMC(dev, 0xc040);
275 if (shift_c040 != 14)
276 nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
277 }
278
279 if (oldramdac580 != ramdac580)
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
281
282 if (!nv3035)
283 NVWriteRAMDAC(dev, 0, reg2, pll2);
284 NVWriteRAMDAC(dev, 0, reg1, pll1);
285
286 if (shift_powerctrl_1 >= 0)
287 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
288 if (chip_version >= 0x40)
289 nvWriteMC(dev, 0xc040, savedc040);
290}
291
292static void
293setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
294 struct nouveau_pll_vals *pv)
295{
296 /* When setting PLLs, there is a merry game of disabling and enabling
297 * various bits of hardware during the process. This function is a
298 * synthesis of six nv4x traces, nearly each card doing a subtly
299 * different thing. With luck all the necessary bits for each card are
300 * combined herein. Without luck it deviates from each card's formula
301 * so as to not work on any :)
302 */
303
304 uint32_t Preg = NMNMreg - 4;
305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */
312 uint32_t maskc040 = ~(3 << 14), savedc040;
313 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
314
315 if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
316 return;
317
318 if (Preg == 0x4000)
319 maskc040 = ~0x333;
320 if (Preg == 0x4058)
321 maskc040 = ~(0xc << 24);
322
323 if (mpll) {
324 struct pll_lims pll_lim;
325 uint8_t Pval2;
326
327 if (get_pll_limits(dev, Preg, &pll_lim))
328 return;
329
330 Pval2 = pv->log2P + pll_lim.log2p_bias;
331 if (Pval2 > pll_lim.max_log2p)
332 Pval2 = pll_lim.max_log2p;
333 Pval |= 1 << 28 | Pval2 << 20;
334
335 saved4600 = nvReadMC(dev, 0x4600);
336 nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
337 }
338 if (single_stage)
339 Pval |= mpll ? 1 << 12 : 1 << 8;
340
341 nvWriteMC(dev, Preg, oldPval | 1 << 28);
342 nvWriteMC(dev, Preg, Pval & ~(4 << 28));
343 if (mpll) {
344 Pval |= 8 << 20;
345 nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
346 nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
347 }
348
349 savedc040 = nvReadMC(dev, 0xc040);
350 nvWriteMC(dev, 0xc040, savedc040 & maskc040);
351
352 nvWriteMC(dev, NMNMreg, NMNM);
353 if (NMNMreg == 0x4024)
354 nvWriteMC(dev, 0x403c, NMNM);
355
356 nvWriteMC(dev, Preg, Pval);
357 if (mpll) {
358 Pval &= ~(8 << 20);
359 nvWriteMC(dev, 0x4020, Pval);
360 nvWriteMC(dev, 0x4038, Pval);
361 nvWriteMC(dev, 0x4600, saved4600);
362 }
363
364 nvWriteMC(dev, 0xc040, savedc040);
365
366 if (mpll) {
367 nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
368 nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
369 }
370}
371
372void
373nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios.chip_version;
378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) {
381 if (reg1 > 0x405c)
382 setPLL_double_highregs(dev, reg1, pv);
383 else
384 setPLL_double_lowregs(dev, reg1, pv);
385 } else
386 setPLL_single(dev, reg1, pv);
387}
388
389/*
390 * PLL getting 130 * PLL getting
391 */ 131 */
392 132
@@ -394,7 +134,7 @@ static void
394nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, 134nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
395 uint32_t pll2, struct nouveau_pll_vals *pllvals) 135 uint32_t pll2, struct nouveau_pll_vals *pllvals)
396{ 136{
397 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct nouveau_drm *drm = nouveau_drm(dev);
398 138
399 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */ 139 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
400 140
@@ -411,7 +151,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
411 pllvals->NM1 = pll1 & 0xffff; 151 pllvals->NM1 = pll1 & 0xffff;
412 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) 152 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
413 pllvals->NM2 = pll2 & 0xffff; 153 pllvals->NM2 = pll2 & 0xffff;
414 else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) { 154 else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
415 pllvals->M1 &= 0xf; /* only 4 bits */ 155 pllvals->M1 &= 0xf; /* only 4 bits */
416 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { 156 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
417 pllvals->M2 = (pll1 >> 4) & 0x7; 157 pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -423,28 +163,30 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
423} 163}
424 164
425int 165int
426nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, 166nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
427 struct nouveau_pll_vals *pllvals) 167 struct nouveau_pll_vals *pllvals)
428{ 168{
429 struct drm_nouveau_private *dev_priv = dev->dev_private; 169 struct nouveau_drm *drm = nouveau_drm(dev);
430 uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0; 170 struct nouveau_device *device = nv_device(drm->device);
431 struct pll_lims pll_lim; 171 struct nouveau_bios *bios = nouveau_bios(device);
172 uint32_t reg1, pll1, pll2 = 0;
173 struct nvbios_pll pll_lim;
432 int ret; 174 int ret;
433 175
434 if (reg1 == 0) 176 ret = nvbios_pll_parse(bios, plltype, &pll_lim);
177 if (ret || !(reg1 = pll_lim.reg))
435 return -ENOENT; 178 return -ENOENT;
436 179
437 pll1 = nvReadMC(dev, reg1); 180 pll1 = nv_rd32(device, reg1);
438
439 if (reg1 <= 0x405c) 181 if (reg1 <= 0x405c)
440 pll2 = nvReadMC(dev, reg1 + 4); 182 pll2 = nv_rd32(device, reg1 + 4);
441 else if (nv_two_reg_pll(dev)) { 183 else if (nv_two_reg_pll(dev)) {
442 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); 184 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
443 185
444 pll2 = nvReadMC(dev, reg2); 186 pll2 = nv_rd32(device, reg2);
445 } 187 }
446 188
447 if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { 189 if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
448 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); 190 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
449 191
450 /* check whether vpll has been forced into single stage mode */ 192 /* check whether vpll has been forced into single stage mode */
@@ -457,13 +199,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
457 } 199 }
458 200
459 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals); 201 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
460
461 ret = get_pll_limits(dev, plltype, &pll_lim);
462 if (ret)
463 return ret;
464
465 pllvals->refclk = pll_lim.refclk; 202 pllvals->refclk = pll_lim.refclk;
466
467 return 0; 203 return 0;
468} 204}
469 205
@@ -478,7 +214,7 @@ nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
478} 214}
479 215
480int 216int
481nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) 217nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
482{ 218{
483 struct nouveau_pll_vals pllvals; 219 struct nouveau_pll_vals pllvals;
484 int ret; 220 int ret;
@@ -517,26 +253,30 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
517 * when such a condition detected. only seen on nv11 to date 253 * when such a condition detected. only seen on nv11 to date
518 */ 254 */
519 255
520 struct pll_lims pll_lim; 256 struct nouveau_drm *drm = nouveau_drm(dev);
257 struct nouveau_device *device = nv_device(drm->device);
258 struct nouveau_clock *clk = nouveau_clock(device);
259 struct nouveau_bios *bios = nouveau_bios(device);
260 struct nvbios_pll pll_lim;
521 struct nouveau_pll_vals pv; 261 struct nouveau_pll_vals pv;
522 enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; 262 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
523 263
524 if (get_pll_limits(dev, pll, &pll_lim)) 264 if (nvbios_pll_parse(bios, pll, &pll_lim))
525 return; 265 return;
526 nouveau_hw_get_pllvals(dev, pll, &pv); 266 nouveau_hw_get_pllvals(dev, pll, &pv);
527 267
528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 268 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && 269 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
530 pv.log2P <= pll_lim.max_log2p) 270 pv.log2P <= pll_lim.max_p)
531 return; 271 return;
532 272
533 NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1); 273 NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
534 274
535 /* set lowest clock within static limits */ 275 /* set lowest clock within static limits */
536 pv.M1 = pll_lim.vco1.max_m; 276 pv.M1 = pll_lim.vco1.max_m;
537 pv.N1 = pll_lim.vco1.min_n; 277 pv.N1 = pll_lim.vco1.min_n;
538 pv.log2P = pll_lim.max_usable_log2p; 278 pv.log2P = pll_lim.max_p_usable;
539 nouveau_hw_setpll(dev, pll_lim.reg, &pv); 279 clk->pll_prog(clk, pll_lim.reg, &pv);
540} 280}
541 281
542/* 282/*
@@ -547,17 +287,16 @@ static void nouveau_vga_font_io(struct drm_device *dev,
547 void __iomem *iovram, 287 void __iomem *iovram,
548 bool save, unsigned plane) 288 bool save, unsigned plane)
549{ 289{
550 struct drm_nouveau_private *dev_priv = dev->dev_private;
551 unsigned i; 290 unsigned i;
552 291
553 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane); 292 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
554 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane); 293 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
555 for (i = 0; i < 16384; i++) { 294 for (i = 0; i < 16384; i++) {
556 if (save) { 295 if (save) {
557 dev_priv->saved_vga_font[plane][i] = 296 nv04_display(dev)->saved_vga_font[plane][i] =
558 ioread32_native(iovram + i * 4); 297 ioread32_native(iovram + i * 4);
559 } else { 298 } else {
560 iowrite32_native(dev_priv->saved_vga_font[plane][i], 299 iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
561 iovram + i * 4); 300 iovram + i * 4);
562 } 301 }
563 } 302 }
@@ -566,6 +305,7 @@ static void nouveau_vga_font_io(struct drm_device *dev,
566void 305void
567nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) 306nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
568{ 307{
308 struct nouveau_drm *drm = nouveau_drm(dev);
569 uint8_t misc, gr4, gr5, gr6, seq2, seq4; 309 uint8_t misc, gr4, gr5, gr6, seq2, seq4;
570 bool graphicsmode; 310 bool graphicsmode;
571 unsigned plane; 311 unsigned plane;
@@ -581,12 +321,12 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
581 if (graphicsmode) /* graphics mode => framebuffer => no need to save */ 321 if (graphicsmode) /* graphics mode => framebuffer => no need to save */
582 return; 322 return;
583 323
584 NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor"); 324 NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
585 325
586 /* map first 64KiB of VRAM, holds VGA fonts etc */ 326 /* map first 64KiB of VRAM, holds VGA fonts etc */
587 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); 327 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
588 if (!iovram) { 328 if (!iovram) {
589 NV_ERROR(dev, "Failed to map VRAM, " 329 NV_ERROR(drm, "Failed to map VRAM, "
590 "cannot save/restore VGA fonts.\n"); 330 "cannot save/restore VGA fonts.\n");
591 return; 331 return;
592 } 332 }
@@ -649,25 +389,25 @@ static void
649nv_save_state_ramdac(struct drm_device *dev, int head, 389nv_save_state_ramdac(struct drm_device *dev, int head,
650 struct nv04_mode_state *state) 390 struct nv04_mode_state *state)
651{ 391{
652 struct drm_nouveau_private *dev_priv = dev->dev_private; 392 struct nouveau_drm *drm = nouveau_drm(dev);
653 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 393 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
654 int i; 394 int i;
655 395
656 if (dev_priv->card_type >= NV_10) 396 if (nv_device(drm->device)->card_type >= NV_10)
657 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); 397 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
658 398
659 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals); 399 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
660 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); 400 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
661 if (nv_two_heads(dev)) 401 if (nv_two_heads(dev))
662 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 402 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
663 if (dev_priv->chipset == 0x11) 403 if (nv_device(drm->device)->chipset == 0x11)
664 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); 404 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
665 405
666 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); 406 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
667 407
668 if (nv_gf4_disp_arch(dev)) 408 if (nv_gf4_disp_arch(dev))
669 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); 409 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
670 if (dev_priv->chipset >= 0x30) 410 if (nv_device(drm->device)->chipset >= 0x30)
671 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); 411 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
672 412
673 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); 413 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -709,7 +449,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
709 if (nv_gf4_disp_arch(dev)) 449 if (nv_gf4_disp_arch(dev))
710 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); 450 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
711 451
712 if (dev_priv->card_type == NV_40) { 452 if (nv_device(drm->device)->card_type == NV_40) {
713 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); 453 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
714 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); 454 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
715 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); 455 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -724,26 +464,27 @@ static void
724nv_load_state_ramdac(struct drm_device *dev, int head, 464nv_load_state_ramdac(struct drm_device *dev, int head,
725 struct nv04_mode_state *state) 465 struct nv04_mode_state *state)
726{ 466{
727 struct drm_nouveau_private *dev_priv = dev->dev_private; 467 struct nouveau_drm *drm = nouveau_drm(dev);
468 struct nouveau_clock *clk = nouveau_clock(drm->device);
728 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 469 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
729 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 470 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
730 int i; 471 int i;
731 472
732 if (dev_priv->card_type >= NV_10) 473 if (nv_device(drm->device)->card_type >= NV_10)
733 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); 474 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
734 475
735 nouveau_hw_setpll(dev, pllreg, &regp->pllvals); 476 clk->pll_prog(clk, pllreg, &regp->pllvals);
736 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); 477 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
737 if (nv_two_heads(dev)) 478 if (nv_two_heads(dev))
738 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); 479 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
739 if (dev_priv->chipset == 0x11) 480 if (nv_device(drm->device)->chipset == 0x11)
740 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); 481 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
741 482
742 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); 483 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
743 484
744 if (nv_gf4_disp_arch(dev)) 485 if (nv_gf4_disp_arch(dev))
745 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
746 if (dev_priv->chipset >= 0x30) 487 if (nv_device(drm->device)->chipset >= 0x30)
747 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
748 489
749 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); 490 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -780,7 +521,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
780 if (nv_gf4_disp_arch(dev)) 521 if (nv_gf4_disp_arch(dev))
781 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); 522 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
782 523
783 if (dev_priv->card_type == NV_40) { 524 if (nv_device(drm->device)->card_type == NV_40) {
784 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
785 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); 526 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
786 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); 527 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -845,7 +586,7 @@ static void
845nv_save_state_ext(struct drm_device *dev, int head, 586nv_save_state_ext(struct drm_device *dev, int head,
846 struct nv04_mode_state *state) 587 struct nv04_mode_state *state)
847{ 588{
848 struct drm_nouveau_private *dev_priv = dev->dev_private; 589 struct nouveau_drm *drm = nouveau_drm(dev);
849 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 590 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
850 int i; 591 int i;
851 592
@@ -861,10 +602,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
861 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 602 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
862 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); 603 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
863 604
864 if (dev_priv->card_type >= NV_20) 605 if (nv_device(drm->device)->card_type >= NV_20)
865 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); 606 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
866 607
867 if (dev_priv->card_type >= NV_30) 608 if (nv_device(drm->device)->card_type >= NV_30)
868 rd_cio_state(dev, head, regp, 0x9f); 609 rd_cio_state(dev, head, regp, 0x9f);
869 610
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); 611 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -873,14 +614,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
873 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 614 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 615 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
875 616
876 if (dev_priv->card_type >= NV_10) { 617 if (nv_device(drm->device)->card_type >= NV_10) {
877 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); 618 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
878 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); 619 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
879 620
880 if (dev_priv->card_type >= NV_30) 621 if (nv_device(drm->device)->card_type >= NV_30)
881 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); 622 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
882 623
883 if (dev_priv->card_type == NV_40) 624 if (nv_device(drm->device)->card_type == NV_40)
884 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); 625 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
885 626
886 if (nv_two_heads(dev)) 627 if (nv_two_heads(dev))
@@ -892,7 +633,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
892 633
893 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 634 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
894 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
895 if (dev_priv->card_type >= NV_10) { 636 if (nv_device(drm->device)->card_type >= NV_10) {
896 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
897 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 638 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
898 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); 639 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -920,12 +661,14 @@ static void
920nv_load_state_ext(struct drm_device *dev, int head, 661nv_load_state_ext(struct drm_device *dev, int head,
921 struct nv04_mode_state *state) 662 struct nv04_mode_state *state)
922{ 663{
923 struct drm_nouveau_private *dev_priv = dev->dev_private; 664 struct nouveau_drm *drm = nouveau_drm(dev);
665 struct nouveau_device *device = nv_device(drm->device);
666 struct nouveau_timer *ptimer = nouveau_timer(device);
924 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 667 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
925 uint32_t reg900; 668 uint32_t reg900;
926 int i; 669 int i;
927 670
928 if (dev_priv->card_type >= NV_10) { 671 if (nv_device(drm->device)->card_type >= NV_10) {
929 if (nv_two_heads(dev)) 672 if (nv_two_heads(dev))
930 /* setting ENGINE_CTRL (EC) *must* come before 673 /* setting ENGINE_CTRL (EC) *must* come before
931 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in 674 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -933,24 +676,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
933 */ 676 */
934 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); 677 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
935 678
936 nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1); 679 nv_wr32(device, NV_PVIDEO_STOP, 1);
937 nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0); 680 nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
938 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0); 681 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
939 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0); 682 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
940 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1); 683 nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
941 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1); 684 nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
942 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1); 685 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
943 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1); 686 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
944 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); 687 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
945 688
946 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 689 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
947 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); 690 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
948 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); 691 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
949 692
950 if (dev_priv->card_type >= NV_30) 693 if (nv_device(drm->device)->card_type >= NV_30)
951 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); 694 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
952 695
953 if (dev_priv->card_type == NV_40) { 696 if (nv_device(drm->device)->card_type == NV_40) {
954 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 697 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
955 698
956 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 699 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -973,23 +716,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); 716 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
974 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 717 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
975 718
976 if (dev_priv->card_type >= NV_20) 719 if (nv_device(drm->device)->card_type >= NV_20)
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); 720 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
978 721
979 if (dev_priv->card_type >= NV_30) 722 if (nv_device(drm->device)->card_type >= NV_30)
980 wr_cio_state(dev, head, regp, 0x9f); 723 wr_cio_state(dev, head, regp, 0x9f);
981 724
982 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
983 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 726 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
984 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 727 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 728 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
986 if (dev_priv->card_type == NV_40) 729 if (nv_device(drm->device)->card_type == NV_40)
987 nv_fix_nv40_hw_cursor(dev, head); 730 nv_fix_nv40_hw_cursor(dev, head);
988 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
989 732
990 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 733 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
991 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
992 if (dev_priv->card_type >= NV_10) { 735 if (nv_device(drm->device)->card_type >= NV_10) {
993 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 736 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
994 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 737 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
995 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); 738 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -997,11 +740,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
997 } 740 }
998 /* NV11 and NV20 stop at 0x52. */ 741 /* NV11 and NV20 stop at 0x52. */
999 if (nv_gf4_disp_arch(dev)) { 742 if (nv_gf4_disp_arch(dev)) {
1000 if (dev_priv->card_type == NV_10) { 743 if (nv_device(drm->device)->card_type == NV_10) {
1001 /* Not waiting for vertical retrace before modifying 744 /* Not waiting for vertical retrace before modifying
1002 CRE_53/CRE_54 causes lockups. */ 745 CRE_53/CRE_54 causes lockups. */
1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 746 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
1004 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 747 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1005 } 748 }
1006 749
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_42); 750 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -1024,14 +767,15 @@ static void
1024nv_save_state_palette(struct drm_device *dev, int head, 767nv_save_state_palette(struct drm_device *dev, int head,
1025 struct nv04_mode_state *state) 768 struct nv04_mode_state *state)
1026{ 769{
770 struct nouveau_device *device = nouveau_dev(dev);
1027 int head_offset = head * NV_PRMDIO_SIZE, i; 771 int head_offset = head * NV_PRMDIO_SIZE, i;
1028 772
1029 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, 773 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
1030 NV_PRMDIO_PIXEL_MASK_MASK); 774 NV_PRMDIO_PIXEL_MASK_MASK);
1031 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); 775 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
1032 776
1033 for (i = 0; i < 768; i++) { 777 for (i = 0; i < 768; i++) {
1034 state->crtc_reg[head].DAC[i] = nv_rd08(dev, 778 state->crtc_reg[head].DAC[i] = nv_rd08(device,
1035 NV_PRMDIO_PALETTE_DATA + head_offset); 779 NV_PRMDIO_PALETTE_DATA + head_offset);
1036 } 780 }
1037 781
@@ -1042,14 +786,15 @@ void
1042nouveau_hw_load_state_palette(struct drm_device *dev, int head, 786nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1043 struct nv04_mode_state *state) 787 struct nv04_mode_state *state)
1044{ 788{
789 struct nouveau_device *device = nouveau_dev(dev);
1045 int head_offset = head * NV_PRMDIO_SIZE, i; 790 int head_offset = head * NV_PRMDIO_SIZE, i;
1046 791
1047 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, 792 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
1048 NV_PRMDIO_PIXEL_MASK_MASK); 793 NV_PRMDIO_PIXEL_MASK_MASK);
1049 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); 794 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
1050 795
1051 for (i = 0; i < 768; i++) { 796 for (i = 0; i < 768; i++) {
1052 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset, 797 nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
1053 state->crtc_reg[head].DAC[i]); 798 state->crtc_reg[head].DAC[i]);
1054 } 799 }
1055 800
@@ -1059,9 +804,9 @@ nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1059void nouveau_hw_save_state(struct drm_device *dev, int head, 804void nouveau_hw_save_state(struct drm_device *dev, int head,
1060 struct nv04_mode_state *state) 805 struct nv04_mode_state *state)
1061{ 806{
1062 struct drm_nouveau_private *dev_priv = dev->dev_private; 807 struct nouveau_drm *drm = nouveau_drm(dev);
1063 808
1064 if (dev_priv->chipset == 0x11) 809 if (nv_device(drm->device)->chipset == 0x11)
1065 /* NB: no attempt is made to restore the bad pll later on */ 810 /* NB: no attempt is made to restore the bad pll later on */
1066 nouveau_hw_fix_bad_vpll(dev, head); 811 nouveau_hw_fix_bad_vpll(dev, head);
1067 nv_save_state_ramdac(dev, head, state); 812 nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 2989090b9434..ba8fc0f9e0db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -24,7 +24,9 @@
24#define __NOUVEAU_HW_H__ 24#define __NOUVEAU_HW_H__
25 25
26#include "drmP.h" 26#include "drmP.h"
27#include "nouveau_drv.h" 27#include "nv04_display.h"
28
29#include <subdev/bios/pll.h>
28 30
29#define MASK(field) ( \ 31#define MASK(field) ( \
30 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field)) 32 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
@@ -38,12 +40,10 @@ void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
38uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); 40uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
39void NVSetOwner(struct drm_device *, int owner); 41void NVSetOwner(struct drm_device *, int owner);
40void NVBlankScreen(struct drm_device *, int head, bool blank); 42void NVBlankScreen(struct drm_device *, int head, bool blank);
41void nouveau_hw_setpll(struct drm_device *, uint32_t reg1, 43int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype,
42 struct nouveau_pll_vals *pv);
43int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
44 struct nouveau_pll_vals *pllvals); 44 struct nouveau_pll_vals *pllvals);
45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals); 45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
46int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype); 46int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype);
47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save); 47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
48void nouveau_hw_save_state(struct drm_device *, int head, 48void nouveau_hw_save_state(struct drm_device *, int head,
49 struct nv04_mode_state *state); 49 struct nv04_mode_state *state);
@@ -55,115 +55,51 @@ void nouveau_hw_load_state_palette(struct drm_device *, int head,
55/* nouveau_calc.c */ 55/* nouveau_calc.c */
56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, 56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
57 int *burst, int *lwm); 57 int *burst, int *lwm);
58extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
59 int clk, struct nouveau_pll_vals *pv);
60
61static inline uint32_t
62nvReadMC(struct drm_device *dev, uint32_t reg)
63{
64 uint32_t val = nv_rd32(dev, reg);
65 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
66 return val;
67}
68
69static inline void
70nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
71{
72 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
73 nv_wr32(dev, reg, val);
74}
75
76static inline uint32_t
77nvReadVIDEO(struct drm_device *dev, uint32_t reg)
78{
79 uint32_t val = nv_rd32(dev, reg);
80 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
81 return val;
82}
83
84static inline void
85nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
86{
87 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
88 nv_wr32(dev, reg, val);
89}
90
91static inline uint32_t
92nvReadFB(struct drm_device *dev, uint32_t reg)
93{
94 uint32_t val = nv_rd32(dev, reg);
95 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
96 return val;
97}
98
99static inline void
100nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
101{
102 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
103 nv_wr32(dev, reg, val);
104}
105
106static inline uint32_t
107nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
108{
109 uint32_t val = nv_rd32(dev, reg);
110 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
111 return val;
112}
113
114static inline void
115nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
116{
117 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
118 nv_wr32(dev, reg, val);
119}
120 58
121static inline uint32_t NVReadCRTC(struct drm_device *dev, 59static inline uint32_t NVReadCRTC(struct drm_device *dev,
122 int head, uint32_t reg) 60 int head, uint32_t reg)
123{ 61{
62 struct nouveau_device *device = nouveau_dev(dev);
124 uint32_t val; 63 uint32_t val;
125 if (head) 64 if (head)
126 reg += NV_PCRTC0_SIZE; 65 reg += NV_PCRTC0_SIZE;
127 val = nv_rd32(dev, reg); 66 val = nv_rd32(device, reg);
128 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
129 return val; 67 return val;
130} 68}
131 69
132static inline void NVWriteCRTC(struct drm_device *dev, 70static inline void NVWriteCRTC(struct drm_device *dev,
133 int head, uint32_t reg, uint32_t val) 71 int head, uint32_t reg, uint32_t val)
134{ 72{
73 struct nouveau_device *device = nouveau_dev(dev);
135 if (head) 74 if (head)
136 reg += NV_PCRTC0_SIZE; 75 reg += NV_PCRTC0_SIZE;
137 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val); 76 nv_wr32(device, reg, val);
138 nv_wr32(dev, reg, val);
139} 77}
140 78
141static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 79static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
142 int head, uint32_t reg) 80 int head, uint32_t reg)
143{ 81{
82 struct nouveau_device *device = nouveau_dev(dev);
144 uint32_t val; 83 uint32_t val;
145 if (head) 84 if (head)
146 reg += NV_PRAMDAC0_SIZE; 85 reg += NV_PRAMDAC0_SIZE;
147 val = nv_rd32(dev, reg); 86 val = nv_rd32(device, reg);
148 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
149 head, reg, val);
150 return val; 87 return val;
151} 88}
152 89
153static inline void NVWriteRAMDAC(struct drm_device *dev, 90static inline void NVWriteRAMDAC(struct drm_device *dev,
154 int head, uint32_t reg, uint32_t val) 91 int head, uint32_t reg, uint32_t val)
155{ 92{
93 struct nouveau_device *device = nouveau_dev(dev);
156 if (head) 94 if (head)
157 reg += NV_PRAMDAC0_SIZE; 95 reg += NV_PRAMDAC0_SIZE;
158 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n", 96 nv_wr32(device, reg, val);
159 head, reg, val);
160 nv_wr32(dev, reg, val);
161} 97}
162 98
163static inline uint8_t nv_read_tmds(struct drm_device *dev, 99static inline uint8_t nv_read_tmds(struct drm_device *dev,
164 int or, int dl, uint8_t address) 100 int or, int dl, uint8_t address)
165{ 101{
166 int ramdac = (or & OUTPUT_C) >> 2; 102 int ramdac = (or & DCB_OUTPUT_C) >> 2;
167 103
168 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, 104 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
169 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address); 105 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
@@ -174,7 +110,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
174 int or, int dl, uint8_t address, 110 int or, int dl, uint8_t address,
175 uint8_t data) 111 uint8_t data)
176{ 112{
177 int ramdac = (or & OUTPUT_C) >> 2; 113 int ramdac = (or & DCB_OUTPUT_C) >> 2;
178 114
179 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data); 115 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
180 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address); 116 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
@@ -183,20 +119,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
183static inline void NVWriteVgaCrtc(struct drm_device *dev, 119static inline void NVWriteVgaCrtc(struct drm_device *dev,
184 int head, uint8_t index, uint8_t value) 120 int head, uint8_t index, uint8_t value)
185{ 121{
186 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n", 122 struct nouveau_device *device = nouveau_dev(dev);
187 head, index, value); 123 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
188 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 124 nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
189 nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
190} 125}
191 126
192static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 127static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
193 int head, uint8_t index) 128 int head, uint8_t index)
194{ 129{
130 struct nouveau_device *device = nouveau_dev(dev);
195 uint8_t val; 131 uint8_t val;
196 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 132 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
197 val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); 133 val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
198 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
199 head, index, val);
200 return val; 134 return val;
201} 135}
202 136
@@ -230,75 +164,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
230static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 164static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
231 int head, uint32_t reg) 165 int head, uint32_t reg)
232{ 166{
233 struct drm_nouveau_private *dev_priv = dev->dev_private; 167 struct nouveau_device *device = nouveau_dev(dev);
168 struct nouveau_drm *drm = nouveau_drm(dev);
234 uint8_t val; 169 uint8_t val;
235 170
236 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 171 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
237 * NVSetOwner for the relevant head to be programmed */ 172 * NVSetOwner for the relevant head to be programmed */
238 if (head && dev_priv->card_type == NV_40) 173 if (head && nv_device(drm->device)->card_type == NV_40)
239 reg += NV_PRMVIO_SIZE; 174 reg += NV_PRMVIO_SIZE;
240 175
241 val = nv_rd08(dev, reg); 176 val = nv_rd08(device, reg);
242 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
243 return val; 177 return val;
244} 178}
245 179
246static inline void NVWritePRMVIO(struct drm_device *dev, 180static inline void NVWritePRMVIO(struct drm_device *dev,
247 int head, uint32_t reg, uint8_t value) 181 int head, uint32_t reg, uint8_t value)
248{ 182{
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 183 struct nouveau_device *device = nouveau_dev(dev);
184 struct nouveau_drm *drm = nouveau_drm(dev);
250 185
251 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 186 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
252 * NVSetOwner for the relevant head to be programmed */ 187 * NVSetOwner for the relevant head to be programmed */
253 if (head && dev_priv->card_type == NV_40) 188 if (head && nv_device(drm->device)->card_type == NV_40)
254 reg += NV_PRMVIO_SIZE; 189 reg += NV_PRMVIO_SIZE;
255 190
256 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", 191 nv_wr08(device, reg, value);
257 head, reg, value);
258 nv_wr08(dev, reg, value);
259} 192}
260 193
261static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 194static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
262{ 195{
263 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 196 struct nouveau_device *device = nouveau_dev(dev);
264 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 197 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
198 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
265} 199}
266 200
267static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 201static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
268{ 202{
269 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 203 struct nouveau_device *device = nouveau_dev(dev);
270 return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 204 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
205 return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
271} 206}
272 207
273static inline void NVWriteVgaAttr(struct drm_device *dev, 208static inline void NVWriteVgaAttr(struct drm_device *dev,
274 int head, uint8_t index, uint8_t value) 209 int head, uint8_t index, uint8_t value)
275{ 210{
211 struct nouveau_device *device = nouveau_dev(dev);
276 if (NVGetEnablePalette(dev, head)) 212 if (NVGetEnablePalette(dev, head))
277 index &= ~0x20; 213 index &= ~0x20;
278 else 214 else
279 index |= 0x20; 215 index |= 0x20;
280 216
281 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 217 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
282 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n", 218 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
283 head, index, value); 219 nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
284 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
285 nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
286} 220}
287 221
288static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 222static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
289 int head, uint8_t index) 223 int head, uint8_t index)
290{ 224{
225 struct nouveau_device *device = nouveau_dev(dev);
291 uint8_t val; 226 uint8_t val;
292 if (NVGetEnablePalette(dev, head)) 227 if (NVGetEnablePalette(dev, head))
293 index &= ~0x20; 228 index &= ~0x20;
294 else 229 else
295 index |= 0x20; 230 index |= 0x20;
296 231
297 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 232 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
298 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); 233 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
299 val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); 234 val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
300 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
301 head, index, val);
302 return val; 235 return val;
303} 236}
304 237
@@ -325,10 +258,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
325static inline bool 258static inline bool
326nv_heads_tied(struct drm_device *dev) 259nv_heads_tied(struct drm_device *dev)
327{ 260{
328 struct drm_nouveau_private *dev_priv = dev->dev_private; 261 struct nouveau_device *device = nouveau_dev(dev);
262 struct nouveau_drm *drm = nouveau_drm(dev);
329 263
330 if (dev_priv->chipset == 0x11) 264 if (nv_device(drm->device)->chipset == 0x11)
331 return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)); 265 return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
332 266
333 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; 267 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
334} 268}
@@ -377,13 +311,13 @@ nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
377static inline bool 311static inline bool
378NVLockVgaCrtcs(struct drm_device *dev, bool lock) 312NVLockVgaCrtcs(struct drm_device *dev, bool lock)
379{ 313{
380 struct drm_nouveau_private *dev_priv = dev->dev_private; 314 struct nouveau_drm *drm = nouveau_drm(dev);
381 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); 315 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
382 316
383 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, 317 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
384 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); 318 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
385 /* NV11 has independently lockable extended crtcs, except when tied */ 319 /* NV11 has independently lockable extended crtcs, except when tied */
386 if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev)) 320 if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
387 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, 321 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
388 lock ? NV_CIO_SR_LOCK_VALUE : 322 lock ? NV_CIO_SR_LOCK_VALUE :
389 NV_CIO_SR_UNLOCK_RW_VALUE); 323 NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -398,9 +332,9 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
398 332
399static inline int nv_cursor_width(struct drm_device *dev) 333static inline int nv_cursor_width(struct drm_device *dev)
400{ 334{
401 struct drm_nouveau_private *dev_priv = dev->dev_private; 335 struct nouveau_drm *drm = nouveau_drm(dev);
402 336
403 return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; 337 return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
404} 338}
405 339
406static inline void 340static inline void
@@ -418,11 +352,11 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
418static inline void 352static inline void
419nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) 353nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
420{ 354{
421 struct drm_nouveau_private *dev_priv = dev->dev_private; 355 struct nouveau_drm *drm = nouveau_drm(dev);
422 356
423 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); 357 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
424 358
425 if (dev_priv->card_type == NV_04) { 359 if (nv_device(drm->device)->card_type == NV_04) {
426 /* 360 /*
427 * Hilarious, the 24th bit doesn't want to stick to 361 * Hilarious, the 24th bit doesn't want to stick to
428 * PCRTC_START... 362 * PCRTC_START...
@@ -437,9 +371,9 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
437static inline void 371static inline void
438nv_show_cursor(struct drm_device *dev, int head, bool show) 372nv_show_cursor(struct drm_device *dev, int head, bool show)
439{ 373{
440 struct drm_nouveau_private *dev_priv = dev->dev_private; 374 struct nouveau_drm *drm = nouveau_drm(dev);
441 uint8_t *curctl1 = 375 uint8_t *curctl1 =
442 &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; 376 &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
443 377
444 if (show) 378 if (show)
445 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 379 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
@@ -447,14 +381,14 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
447 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 381 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
448 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); 382 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
449 383
450 if (dev_priv->card_type == NV_40) 384 if (nv_device(drm->device)->card_type == NV_40)
451 nv_fix_nv40_hw_cursor(dev, head); 385 nv_fix_nv40_hw_cursor(dev, head);
452} 386}
453 387
454static inline uint32_t 388static inline uint32_t
455nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) 389nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
456{ 390{
457 struct drm_nouveau_private *dev_priv = dev->dev_private; 391 struct nouveau_drm *drm = nouveau_drm(dev);
458 int mask; 392 int mask;
459 393
460 if (bpp == 15) 394 if (bpp == 15)
@@ -463,7 +397,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
463 bpp = 8; 397 bpp = 8;
464 398
465 /* Alignment requirements taken from the Haiku driver */ 399 /* Alignment requirements taken from the Haiku driver */
466 if (dev_priv->card_type == NV_04) 400 if (nv_device(drm->device)->card_type == NV_04)
467 mask = 128 / bpp - 1; 401 mask = 128 / bpp - 1;
468 else 402 else
469 mask = 512 / bpp - 1; 403 mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
deleted file mode 100644
index 240cf962c999..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ /dev/null
@@ -1,394 +0,0 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/module.h>
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_i2c.h"
30#include "nouveau_hw.h"
31
32static void
33i2c_drive_scl(void *data, int state)
34{
35 struct nouveau_i2c_chan *port = data;
36 if (port->type == 0) {
37 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
38 if (state) val |= 0x20;
39 else val &= 0xdf;
40 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
41 } else
42 if (port->type == 4) {
43 nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
44 } else
45 if (port->type == 5) {
46 if (state) port->state |= 0x01;
47 else port->state &= 0xfe;
48 nv_wr32(port->dev, port->drive, 4 | port->state);
49 }
50}
51
52static void
53i2c_drive_sda(void *data, int state)
54{
55 struct nouveau_i2c_chan *port = data;
56 if (port->type == 0) {
57 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
58 if (state) val |= 0x10;
59 else val &= 0xef;
60 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
61 } else
62 if (port->type == 4) {
63 nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
64 } else
65 if (port->type == 5) {
66 if (state) port->state |= 0x02;
67 else port->state &= 0xfd;
68 nv_wr32(port->dev, port->drive, 4 | port->state);
69 }
70}
71
72static int
73i2c_sense_scl(void *data)
74{
75 struct nouveau_i2c_chan *port = data;
76 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
77 if (port->type == 0) {
78 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
79 } else
80 if (port->type == 4) {
81 return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
82 } else
83 if (port->type == 5) {
84 if (dev_priv->card_type < NV_D0)
85 return !!(nv_rd32(port->dev, port->sense) & 0x01);
86 else
87 return !!(nv_rd32(port->dev, port->sense) & 0x10);
88 }
89 return 0;
90}
91
92static int
93i2c_sense_sda(void *data)
94{
95 struct nouveau_i2c_chan *port = data;
96 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
97 if (port->type == 0) {
98 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
99 } else
100 if (port->type == 4) {
101 return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
102 } else
103 if (port->type == 5) {
104 if (dev_priv->card_type < NV_D0)
105 return !!(nv_rd32(port->dev, port->sense) & 0x02);
106 else
107 return !!(nv_rd32(port->dev, port->sense) & 0x20);
108 }
109 return 0;
110}
111
112static const uint32_t nv50_i2c_port[] = {
113 0x00e138, 0x00e150, 0x00e168, 0x00e180,
114 0x00e254, 0x00e274, 0x00e764, 0x00e780,
115 0x00e79c, 0x00e7b8
116};
117
118static u8 *
119i2c_table(struct drm_device *dev, u8 *version)
120{
121 u8 *dcb = dcb_table(dev), *i2c = NULL;
122 if (dcb) {
123 if (dcb[0] >= 0x15)
124 i2c = ROMPTR(dev, dcb[2]);
125 if (dcb[0] >= 0x30)
126 i2c = ROMPTR(dev, dcb[4]);
127 }
128
129 /* early revisions had no version number, use dcb version */
130 if (i2c) {
131 *version = dcb[0];
132 if (*version >= 0x30)
133 *version = i2c[0];
134 }
135
136 return i2c;
137}
138
139int
140nouveau_i2c_init(struct drm_device *dev)
141{
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nvbios *bios = &dev_priv->vbios;
144 struct nouveau_i2c_chan *port;
145 u8 version = 0x00, entries, recordlen;
146 u8 *i2c, *entry, legacy[2][4] = {};
147 int ret, i;
148
149 INIT_LIST_HEAD(&dev_priv->i2c_ports);
150
151 i2c = i2c_table(dev, &version);
152 if (!i2c) {
153 u8 *bmp = &bios->data[bios->offset];
154 if (bios->type != NVBIOS_BMP)
155 return -ENODEV;
156
157 legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
158 legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
159 legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
160 legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
161
162 /* BMP (from v4.0) has i2c info in the structure, it's in a
163 * fixed location on earlier VBIOS
164 */
165 if (bmp[5] < 4)
166 i2c = &bios->data[0x48];
167 else
168 i2c = &bmp[0x36];
169
170 if (i2c[4]) legacy[0][0] = i2c[4];
171 if (i2c[5]) legacy[0][1] = i2c[5];
172 if (i2c[6]) legacy[1][0] = i2c[6];
173 if (i2c[7]) legacy[1][1] = i2c[7];
174 }
175
176 if (version >= 0x30) {
177 entry = i2c[1] + i2c;
178 entries = i2c[2];
179 recordlen = i2c[3];
180 } else
181 if (version) {
182 entry = i2c;
183 entries = 16;
184 recordlen = 4;
185 } else {
186 entry = legacy[0];
187 entries = 2;
188 recordlen = 4;
189 }
190
191 for (i = 0; i < entries; i++, entry += recordlen) {
192 port = kzalloc(sizeof(*port), GFP_KERNEL);
193 if (port == NULL) {
194 nouveau_i2c_fini(dev);
195 return -ENOMEM;
196 }
197
198 port->type = entry[3];
199 if (version < 0x30) {
200 port->type &= 0x07;
201 if (port->type == 0x07)
202 port->type = 0xff;
203 }
204
205 if (port->type == 0xff) {
206 kfree(port);
207 continue;
208 }
209
210 switch (port->type) {
211 case 0: /* NV04:NV50 */
212 port->drive = entry[0];
213 port->sense = entry[1];
214 break;
215 case 4: /* NV4E */
216 port->drive = 0x600800 + entry[1];
217 port->sense = port->drive;
218 break;
219 case 5: /* NV50- */
220 port->drive = entry[0] & 0x0f;
221 if (dev_priv->card_type < NV_D0) {
222 if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
223 break;
224 port->drive = nv50_i2c_port[port->drive];
225 port->sense = port->drive;
226 } else {
227 port->drive = 0x00d014 + (port->drive * 0x20);
228 port->sense = port->drive;
229 }
230 break;
231 case 6: /* NV50- DP AUX */
232 port->drive = entry[0] & 0x0f;
233 port->sense = port->drive;
234 port->adapter.algo = &nouveau_dp_i2c_algo;
235 break;
236 default:
237 break;
238 }
239
240 if (!port->adapter.algo && !port->drive) {
241 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
242 i, port->type, port->drive, port->sense);
243 kfree(port);
244 continue;
245 }
246
247 snprintf(port->adapter.name, sizeof(port->adapter.name),
248 "nouveau-%s-%d", pci_name(dev->pdev), i);
249 port->adapter.owner = THIS_MODULE;
250 port->adapter.dev.parent = &dev->pdev->dev;
251 port->dev = dev;
252 port->index = i;
253 port->dcb = ROM32(entry[0]);
254 i2c_set_adapdata(&port->adapter, i2c);
255
256 if (port->adapter.algo != &nouveau_dp_i2c_algo) {
257 port->adapter.algo_data = &port->bit;
258 port->bit.udelay = 10;
259 port->bit.timeout = usecs_to_jiffies(2200);
260 port->bit.data = port;
261 port->bit.setsda = i2c_drive_sda;
262 port->bit.setscl = i2c_drive_scl;
263 port->bit.getsda = i2c_sense_sda;
264 port->bit.getscl = i2c_sense_scl;
265
266 i2c_drive_scl(port, 0);
267 i2c_drive_sda(port, 1);
268 i2c_drive_scl(port, 1);
269
270 ret = i2c_bit_add_bus(&port->adapter);
271 } else {
272 port->adapter.algo = &nouveau_dp_i2c_algo;
273 ret = i2c_add_adapter(&port->adapter);
274 }
275
276 if (ret) {
277 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
278 kfree(port);
279 continue;
280 }
281
282 list_add_tail(&port->head, &dev_priv->i2c_ports);
283 }
284
285 return 0;
286}
287
288void
289nouveau_i2c_fini(struct drm_device *dev)
290{
291 struct drm_nouveau_private *dev_priv = dev->dev_private;
292 struct nouveau_i2c_chan *port, *tmp;
293
294 list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
295 i2c_del_adapter(&port->adapter);
296 kfree(port);
297 }
298}
299
300struct nouveau_i2c_chan *
301nouveau_i2c_find(struct drm_device *dev, u8 index)
302{
303 struct drm_nouveau_private *dev_priv = dev->dev_private;
304 struct nouveau_i2c_chan *port;
305
306 if (index == NV_I2C_DEFAULT(0) ||
307 index == NV_I2C_DEFAULT(1)) {
308 u8 version, *i2c = i2c_table(dev, &version);
309 if (i2c && version >= 0x30) {
310 if (index == NV_I2C_DEFAULT(0))
311 index = (i2c[4] & 0x0f);
312 else
313 index = (i2c[4] & 0xf0) >> 4;
314 } else {
315 index = 2;
316 }
317 }
318
319 list_for_each_entry(port, &dev_priv->i2c_ports, head) {
320 if (port->index == index)
321 break;
322 }
323
324 if (&port->head == &dev_priv->i2c_ports)
325 return NULL;
326
327 if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
328 u32 reg = 0x00e500, val;
329 if (port->type == 6) {
330 reg += port->drive * 0x50;
331 val = 0x2002;
332 } else {
333 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
334 val = 0xe001;
335 }
336
337 /* nfi, but neither auxch or i2c work if it's 1 */
338 nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
339 /* nfi, but switches auxch vs normal i2c */
340 nv_mask(dev, reg + 0x00, 0x0000f003, val);
341 }
342
343 return port;
344}
345
346bool
347nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
348{
349 uint8_t buf[] = { 0 };
350 struct i2c_msg msgs[] = {
351 {
352 .addr = addr,
353 .flags = 0,
354 .len = 1,
355 .buf = buf,
356 },
357 {
358 .addr = addr,
359 .flags = I2C_M_RD,
360 .len = 1,
361 .buf = buf,
362 }
363 };
364
365 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
366}
367
368int
369nouveau_i2c_identify(struct drm_device *dev, const char *what,
370 struct i2c_board_info *info,
371 bool (*match)(struct nouveau_i2c_chan *,
372 struct i2c_board_info *),
373 int index)
374{
375 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
376 int i;
377
378 if (!i2c) {
379 NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
380 return -ENODEV;
381 }
382
383 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
384 for (i = 0; info[i].addr; i++) {
385 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
386 (!match || match(i2c, &info[i]))) {
387 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
388 return i;
389 }
390 }
391
392 NV_DEBUG(dev, "No devices found.\n");
393 return -ENODEV;
394}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 475ba810bba3..6ac560ee478c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -36,7 +36,7 @@
36#include "drmP.h" 36#include "drmP.h"
37#include "drm.h" 37#include "drm.h"
38 38
39#include "nouveau_drv.h" 39#include "nouveau_ioctl.h"
40 40
41/** 41/**
42 * Called whenever a 32-bit process running under a 64-bit kernel 42 * Called whenever a 32-bit process running under a 64-bit kernel
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
new file mode 100644
index 000000000000..ef2b2906d9e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -0,0 +1,6 @@
1#ifndef __NOUVEAU_IOCTL_H__
2#define __NOUVEAU_IOCTL_H__
3
4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5
6#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index b2c2937531a8..9ca8afdb5549 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1,147 +1,86 @@
1/* 1/*
2 * Copyright (C) 2006 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * All Rights Reserved. 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
5 * 10 *
6 * Permission is hereby granted, free of charge, to any person obtaining 11 * The above copyright notice and this permission notice shall be included in
7 * a copy of this software and associated documentation files (the 12 * all copies or substantial portions of the Software.
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 * 13 *
14 * The above copyright notice and this permission notice (including the 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * next paragraph) shall be included in all copies or substantial 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * portions of the Software. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * 21 *
22 * Authors: Ben Skeggs
26 */ 23 */
27 24
28/* 25#include <subdev/mc.h>
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32 26
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h" 27#include "nouveau_drm.h"
36#include "nouveau_drv.h" 28#include "nouveau_irq.h"
37#include "nouveau_reg.h" 29#include "nv50_display.h"
38#include "nouveau_ramht.h"
39#include "nouveau_util.h"
40 30
41void 31void
42nouveau_irq_preinstall(struct drm_device *dev) 32nouveau_irq_preinstall(struct drm_device *dev)
43{ 33{
44 /* Master disable */ 34 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
45 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
46} 35}
47 36
48int 37int
49nouveau_irq_postinstall(struct drm_device *dev) 38nouveau_irq_postinstall(struct drm_device *dev)
50{ 39{
51 struct drm_nouveau_private *dev_priv = dev->dev_private; 40 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
52
53 /* Master enable */
54 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
55 if (dev_priv->msi_enabled)
56 nv_wr08(dev, 0x00088068, 0xff);
57
58 return 0; 41 return 0;
59} 42}
60 43
61void 44void
62nouveau_irq_uninstall(struct drm_device *dev) 45nouveau_irq_uninstall(struct drm_device *dev)
63{ 46{
64 /* Master disable */ 47 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
65 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
66} 48}
67 49
68irqreturn_t 50irqreturn_t
69nouveau_irq_handler(DRM_IRQ_ARGS) 51nouveau_irq_handler(DRM_IRQ_ARGS)
70{ 52{
71 struct drm_device *dev = (struct drm_device *)arg; 53 struct drm_device *dev = arg;
72 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct nouveau_device *device = nouveau_dev(dev);
73 unsigned long flags; 55 struct nouveau_mc *pmc = nouveau_mc(device);
74 u32 stat; 56 u32 stat;
75 int i;
76 57
77 stat = nv_rd32(dev, NV03_PMC_INTR_0); 58 stat = nv_rd32(device, 0x000100);
78 if (stat == 0 || stat == ~0) 59 if (stat == 0 || stat == ~0)
79 return IRQ_NONE; 60 return IRQ_NONE;
80 61
81 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 62 nv_subdev(pmc)->intr(nv_subdev(pmc));
82 for (i = 0; i < 32 && stat; i++) {
83 if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
84 continue;
85 63
86 dev_priv->irq_handler[i](dev); 64 if (device->card_type >= NV_D0) {
87 stat &= ~(1 << i); 65 if (nv_rd32(device, 0x000100) & 0x04000000)
66 nvd0_display_intr(dev);
67 } else
68 if (device->card_type >= NV_50) {
69 if (nv_rd32(device, 0x000100) & 0x04000000)
70 nv50_display_intr(dev);
88 } 71 }
89 72
90 if (dev_priv->msi_enabled)
91 nv_wr08(dev, 0x00088068, 0xff);
92 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
93
94 if (stat && nouveau_ratelimit())
95 NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
96 return IRQ_HANDLED; 73 return IRQ_HANDLED;
97} 74}
98 75
99int 76int
100nouveau_irq_init(struct drm_device *dev) 77nouveau_irq_init(struct drm_device *dev)
101{ 78{
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 int ret;
104
105 if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
106 ret = pci_enable_msi(dev->pdev);
107 if (ret == 0) {
108 NV_INFO(dev, "enabled MSI\n");
109 dev_priv->msi_enabled = true;
110 }
111 }
112
113 return drm_irq_install(dev); 79 return drm_irq_install(dev);
114} 80}
115 81
116void 82void
117nouveau_irq_fini(struct drm_device *dev) 83nouveau_irq_fini(struct drm_device *dev)
118{ 84{
119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120
121 drm_irq_uninstall(dev); 85 drm_irq_uninstall(dev);
122 if (dev_priv->msi_enabled)
123 pci_disable_msi(dev->pdev);
124}
125
126void
127nouveau_irq_register(struct drm_device *dev, int status_bit,
128 void (*handler)(struct drm_device *))
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 unsigned long flags;
132
133 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
134 dev_priv->irq_handler[status_bit] = handler;
135 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
136}
137
138void
139nouveau_irq_unregister(struct drm_device *dev, int status_bit)
140{
141 struct drm_nouveau_private *dev_priv = dev->dev_private;
142 unsigned long flags;
143
144 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
145 dev_priv->irq_handler[status_bit] = NULL;
146 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
147} 86}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
new file mode 100644
index 000000000000..06714ad857bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.h
@@ -0,0 +1,11 @@
1#ifndef __NOUVEAU_IRQ_H__
2#define __NOUVEAU_IRQ_H__
3
4extern int nouveau_irq_init(struct drm_device *);
5extern void nouveau_irq_fini(struct drm_device *);
6extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
7extern void nouveau_irq_preinstall(struct drm_device *);
8extern int nouveau_irq_postinstall(struct drm_device *);
9extern void nouveau_irq_uninstall(struct drm_device *);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5b498ea32e14..7e0ff10a2759 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -30,448 +30,10 @@
30 * Roy Spliet <r.spliet@student.tudelft.nl> 30 * Roy Spliet <r.spliet@student.tudelft.nl>
31 */ 31 */
32 32
33 33#include "nouveau_drm.h"
34#include "drmP.h"
35#include "drm.h"
36#include "drm_sarea.h"
37
38#include "nouveau_drv.h"
39#include "nouveau_pm.h" 34#include "nouveau_pm.h"
40#include "nouveau_mm.h"
41#include "nouveau_vm.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
44
45/*
46 * NV10-NV40 tiling helpers
47 */
48
49static void
50nv10_mem_update_tile_region(struct drm_device *dev,
51 struct nouveau_tile_reg *tile, uint32_t addr,
52 uint32_t size, uint32_t pitch, uint32_t flags)
53{
54 struct drm_nouveau_private *dev_priv = dev->dev_private;
55 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
56 int i = tile - dev_priv->tile.reg, j;
57 unsigned long save;
58
59 nouveau_fence_unref(&tile->fence);
60
61 if (tile->pitch)
62 pfb->free_tile_region(dev, i);
63
64 if (pitch)
65 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
66
67 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
68 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
69 nv04_fifo_cache_pull(dev, false);
70
71 nouveau_wait_for_idle(dev);
72
73 pfb->set_tile_region(dev, i);
74 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
75 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
76 dev_priv->eng[j]->set_tile_region(dev, i);
77 }
78
79 nv04_fifo_cache_pull(dev, true);
80 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
81 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
82}
83
84static struct nouveau_tile_reg *
85nv10_mem_get_tile_region(struct drm_device *dev, int i)
86{
87 struct drm_nouveau_private *dev_priv = dev->dev_private;
88 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
89
90 spin_lock(&dev_priv->tile.lock);
91
92 if (!tile->used &&
93 (!tile->fence || nouveau_fence_done(tile->fence)))
94 tile->used = true;
95 else
96 tile = NULL;
97
98 spin_unlock(&dev_priv->tile.lock);
99 return tile;
100}
101
102void
103nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
104 struct nouveau_fence *fence)
105{
106 struct drm_nouveau_private *dev_priv = dev->dev_private;
107
108 if (tile) {
109 spin_lock(&dev_priv->tile.lock);
110 if (fence) {
111 /* Mark it as pending. */
112 tile->fence = fence;
113 nouveau_fence_ref(fence);
114 }
115
116 tile->used = false;
117 spin_unlock(&dev_priv->tile.lock);
118 }
119}
120
121struct nouveau_tile_reg *
122nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
123 uint32_t pitch, uint32_t flags)
124{
125 struct drm_nouveau_private *dev_priv = dev->dev_private;
126 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
127 struct nouveau_tile_reg *tile, *found = NULL;
128 int i;
129
130 for (i = 0; i < pfb->num_tiles; i++) {
131 tile = nv10_mem_get_tile_region(dev, i);
132
133 if (pitch && !found) {
134 found = tile;
135 continue;
136
137 } else if (tile && tile->pitch) {
138 /* Kill an unused tile region. */
139 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
140 }
141
142 nv10_mem_put_tile_region(dev, tile, NULL);
143 }
144
145 if (found)
146 nv10_mem_update_tile_region(dev, found, addr, size,
147 pitch, flags);
148 return found;
149}
150
151/*
152 * Cleanup everything
153 */
154void
155nouveau_mem_vram_fini(struct drm_device *dev)
156{
157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158
159 ttm_bo_device_release(&dev_priv->ttm.bdev);
160
161 nouveau_ttm_global_release(dev_priv);
162
163 if (dev_priv->fb_mtrr >= 0) {
164 drm_mtrr_del(dev_priv->fb_mtrr,
165 pci_resource_start(dev->pdev, 1),
166 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
167 dev_priv->fb_mtrr = -1;
168 }
169}
170
171void
172nouveau_mem_gart_fini(struct drm_device *dev)
173{
174 nouveau_sgdma_takedown(dev);
175
176 if (drm_core_has_AGP(dev) && dev->agp) {
177 struct drm_agp_mem *entry, *tempe;
178
179 /* Remove AGP resources, but leave dev->agp
180 intact until drv_cleanup is called. */
181 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
182 if (entry->bound)
183 drm_unbind_agp(entry->memory);
184 drm_free_agp(entry->memory, entry->pages);
185 kfree(entry);
186 }
187 INIT_LIST_HEAD(&dev->agp->memory);
188 35
189 if (dev->agp->acquired) 36#include <subdev/fb.h>
190 drm_agp_release(dev);
191
192 dev->agp->acquired = 0;
193 dev->agp->enabled = 0;
194 }
195}
196
197bool
198nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
199{
200 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
201 return true;
202
203 return false;
204}
205
206#if __OS_HAS_AGP
207static unsigned long
208get_agp_mode(struct drm_device *dev, unsigned long mode)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211
212 /*
213 * FW seems to be broken on nv18, it makes the card lock up
214 * randomly.
215 */
216 if (dev_priv->chipset == 0x18)
217 mode &= ~PCI_AGP_COMMAND_FW;
218
219 /*
220 * AGP mode set in the command line.
221 */
222 if (nouveau_agpmode > 0) {
223 bool agpv3 = mode & 0x8;
224 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
225
226 mode = (mode & ~0x7) | (rate & 0x7);
227 }
228
229 return mode;
230}
231#endif
232
233int
234nouveau_mem_reset_agp(struct drm_device *dev)
235{
236#if __OS_HAS_AGP
237 uint32_t saved_pci_nv_1, pmc_enable;
238 int ret;
239
240 /* First of all, disable fast writes, otherwise if it's
241 * already enabled in the AGP bridge and we disable the card's
242 * AGP controller we might be locking ourselves out of it. */
243 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
244 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
245 struct drm_agp_info info;
246 struct drm_agp_mode mode;
247
248 ret = drm_agp_info(dev, &info);
249 if (ret)
250 return ret;
251
252 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
253 ret = drm_agp_enable(dev, mode);
254 if (ret)
255 return ret;
256 }
257
258 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
259
260 /* clear busmaster bit */
261 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
262 /* disable AGP */
263 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
264
265 /* power cycle pgraph, if enabled */
266 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
267 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
268 nv_wr32(dev, NV03_PMC_ENABLE,
269 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
270 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
271 NV_PMC_ENABLE_PGRAPH);
272 }
273
274 /* and restore (gives effect of resetting AGP) */
275 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
276#endif
277
278 return 0;
279}
280
281int
282nouveau_mem_init_agp(struct drm_device *dev)
283{
284#if __OS_HAS_AGP
285 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 struct drm_agp_info info;
287 struct drm_agp_mode mode;
288 int ret;
289
290 if (!dev->agp->acquired) {
291 ret = drm_agp_acquire(dev);
292 if (ret) {
293 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
294 return ret;
295 }
296 }
297
298 nouveau_mem_reset_agp(dev);
299
300 ret = drm_agp_info(dev, &info);
301 if (ret) {
302 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
303 return ret;
304 }
305
306 /* see agp.h for the AGPSTAT_* modes available */
307 mode.mode = get_agp_mode(dev, info.mode);
308 ret = drm_agp_enable(dev, mode);
309 if (ret) {
310 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
311 return ret;
312 }
313
314 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
315 dev_priv->gart_info.aper_base = info.aperture_base;
316 dev_priv->gart_info.aper_size = info.aperture_size;
317#endif
318 return 0;
319}
320
321static const struct vram_types {
322 int value;
323 const char *name;
324} vram_type_map[] = {
325 { NV_MEM_TYPE_STOLEN , "stolen system memory" },
326 { NV_MEM_TYPE_SGRAM , "SGRAM" },
327 { NV_MEM_TYPE_SDRAM , "SDRAM" },
328 { NV_MEM_TYPE_DDR1 , "DDR1" },
329 { NV_MEM_TYPE_DDR2 , "DDR2" },
330 { NV_MEM_TYPE_DDR3 , "DDR3" },
331 { NV_MEM_TYPE_GDDR2 , "GDDR2" },
332 { NV_MEM_TYPE_GDDR3 , "GDDR3" },
333 { NV_MEM_TYPE_GDDR4 , "GDDR4" },
334 { NV_MEM_TYPE_GDDR5 , "GDDR5" },
335 { NV_MEM_TYPE_UNKNOWN, "unknown type" }
336};
337
338int
339nouveau_mem_vram_init(struct drm_device *dev)
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
343 const struct vram_types *vram_type;
344 int ret, dma_bits;
345
346 dma_bits = 32;
347 if (dev_priv->card_type >= NV_50) {
348 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
349 dma_bits = 40;
350 } else
351 if (0 && pci_is_pcie(dev->pdev) &&
352 dev_priv->chipset > 0x40 &&
353 dev_priv->chipset != 0x45) {
354 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
355 dma_bits = 39;
356 }
357
358 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
359 if (ret)
360 return ret;
361 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
362 if (ret) {
363 /* Reset to default value. */
364 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
365 }
366
367
368 ret = nouveau_ttm_global_init(dev_priv);
369 if (ret)
370 return ret;
371
372 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
373 dev_priv->ttm.bo_global_ref.ref.object,
374 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
375 dma_bits <= 32 ? true : false);
376 if (ret) {
377 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
378 return ret;
379 }
380
381 vram_type = vram_type_map;
382 while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
383 if (nouveau_vram_type) {
384 if (!strcasecmp(nouveau_vram_type, vram_type->name))
385 break;
386 dev_priv->vram_type = vram_type->value;
387 } else {
388 if (vram_type->value == dev_priv->vram_type)
389 break;
390 }
391 vram_type++;
392 }
393
394 NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
395 (int)(dev_priv->vram_size >> 20), vram_type->name);
396 if (dev_priv->vram_sys_base) {
397 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
398 dev_priv->vram_sys_base);
399 }
400
401 dev_priv->fb_available_size = dev_priv->vram_size;
402 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
403 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
404 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
405 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
406
407 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
408 dev_priv->fb_aper_free = dev_priv->fb_available_size;
409
410 /* mappable vram */
411 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
412 dev_priv->fb_available_size >> PAGE_SHIFT);
413 if (ret) {
414 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
415 return ret;
416 }
417
418 if (dev_priv->card_type < NV_50) {
419 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
420 0, 0, NULL, &dev_priv->vga_ram);
421 if (ret == 0)
422 ret = nouveau_bo_pin(dev_priv->vga_ram,
423 TTM_PL_FLAG_VRAM);
424
425 if (ret) {
426 NV_WARN(dev, "failed to reserve VGA memory\n");
427 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
428 }
429 }
430
431 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
432 pci_resource_len(dev->pdev, 1),
433 DRM_MTRR_WC);
434 return 0;
435}
436
437int
438nouveau_mem_gart_init(struct drm_device *dev)
439{
440 struct drm_nouveau_private *dev_priv = dev->dev_private;
441 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
442 int ret;
443
444 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
445
446#if !defined(__powerpc__) && !defined(__ia64__)
447 if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
448 ret = nouveau_mem_init_agp(dev);
449 if (ret)
450 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
451 }
452#endif
453
454 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
455 ret = nouveau_sgdma_init(dev);
456 if (ret) {
457 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
458 return ret;
459 }
460 }
461
462 NV_INFO(dev, "%d MiB GART (aperture)\n",
463 (int)(dev_priv->gart_info.aper_size >> 20));
464 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
465
466 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
467 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
468 if (ret) {
469 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
470 return ret;
471 }
472
473 return 0;
474}
475 37
476static int 38static int
477nv40_mem_timing_calc(struct drm_device *dev, u32 freq, 39nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
@@ -479,6 +41,8 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
479 struct nouveau_pm_memtiming *boot, 41 struct nouveau_pm_memtiming *boot,
480 struct nouveau_pm_memtiming *t) 42 struct nouveau_pm_memtiming *t)
481{ 43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45
482 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); 46 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
483 47
484 /* XXX: I don't trust the -1's and +1's... they must come 48 /* XXX: I don't trust the -1's and +1's... they must come
@@ -494,7 +58,7 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
494 e->tRCDWR << 8 | 58 e->tRCDWR << 8 |
495 e->tRCDRD); 59 e->tRCDRD);
496 60
497 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id, 61 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
498 t->reg[0], t->reg[1], t->reg[2]); 62 t->reg[0], t->reg[1], t->reg[2]);
499 return 0; 63 return 0;
500} 64}
@@ -505,7 +69,9 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
505 struct nouveau_pm_memtiming *boot, 69 struct nouveau_pm_memtiming *boot,
506 struct nouveau_pm_memtiming *t) 70 struct nouveau_pm_memtiming *t)
507{ 71{
508 struct drm_nouveau_private *dev_priv = dev->dev_private; 72 struct nouveau_device *device = nouveau_dev(dev);
73 struct nouveau_fb *pfb = nouveau_fb(device);
74 struct nouveau_drm *drm = nouveau_drm(dev);
509 struct bit_entry P; 75 struct bit_entry P;
510 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; 76 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
511 77
@@ -559,7 +125,7 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
559 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; 125 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
560 126
561 /* XXX: P.version == 1 only has DDR2 and GDDR3? */ 127 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
562 if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) { 128 if (pfb->ram.type == NV_MEM_TYPE_DDR2) {
563 t->reg[5] |= (e->tCL + 3) << 8; 129 t->reg[5] |= (e->tCL + 3) << 8;
564 t->reg[6] |= (t->tCWL - 2) << 8; 130 t->reg[6] |= (t->tCWL - 2) << 8;
565 t->reg[8] |= (e->tCL - 4); 131 t->reg[8] |= (e->tCL - 4);
@@ -592,11 +158,11 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
592 0x202; 158 0x202;
593 } 159 }
594 160
595 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, 161 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
596 t->reg[0], t->reg[1], t->reg[2], t->reg[3]); 162 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
597 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 163 NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
598 t->reg[4], t->reg[5], t->reg[6], t->reg[7]); 164 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
599 NV_DEBUG(dev, " 240: %08x\n", t->reg[8]); 165 NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
600 return 0; 166 return 0;
601} 167}
602 168
@@ -606,6 +172,8 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
606 struct nouveau_pm_memtiming *boot, 172 struct nouveau_pm_memtiming *boot,
607 struct nouveau_pm_memtiming *t) 173 struct nouveau_pm_memtiming *t)
608{ 174{
175 struct nouveau_drm *drm = nouveau_drm(dev);
176
609 if (e->tCWL > 0) 177 if (e->tCWL > 0)
610 t->tCWL = e->tCWL; 178 t->tCWL = e->tCWL;
611 179
@@ -628,9 +196,9 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
628 t->reg[4] = (boot->reg[4] & 0xfff00fff) | 196 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
629 (e->tRRD&0x1f) << 15; 197 (e->tRRD&0x1f) << 15;
630 198
631 NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, 199 NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
632 t->reg[0], t->reg[1], t->reg[2], t->reg[3]); 200 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
633 NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]); 201 NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
634 return 0; 202 return 0;
635} 203}
636 204
@@ -644,6 +212,8 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
644 struct nouveau_pm_memtiming *boot, 212 struct nouveau_pm_memtiming *boot,
645 struct nouveau_pm_memtiming *t) 213 struct nouveau_pm_memtiming *t)
646{ 214{
215 struct nouveau_drm *drm = nouveau_drm(dev);
216
647 t->drive_strength = 0; 217 t->drive_strength = 0;
648 if (len < 15) { 218 if (len < 15) {
649 t->odt = boot->odt; 219 t->odt = boot->odt;
@@ -652,17 +222,17 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
652 } 222 }
653 223
654 if (e->tCL >= NV_MEM_CL_DDR2_MAX) { 224 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
655 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 225 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
656 return -ERANGE; 226 return -ERANGE;
657 } 227 }
658 228
659 if (e->tWR >= NV_MEM_WR_DDR2_MAX) { 229 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
660 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 230 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
661 return -ERANGE; 231 return -ERANGE;
662 } 232 }
663 233
664 if (t->odt > 3) { 234 if (t->odt > 3) {
665 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x", 235 NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
666 t->id, t->odt); 236 t->id, t->odt);
667 t->odt = 0; 237 t->odt = 0;
668 } 238 }
@@ -674,11 +244,11 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
674 (t->odt & 0x1) << 2 | 244 (t->odt & 0x1) << 2 |
675 (t->odt & 0x2) << 5; 245 (t->odt & 0x2) << 5;
676 246
677 NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]); 247 NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
678 return 0; 248 return 0;
679} 249}
680 250
681uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { 251static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
682 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; 252 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
683 253
684static int 254static int
@@ -687,6 +257,7 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
687 struct nouveau_pm_memtiming *boot, 257 struct nouveau_pm_memtiming *boot,
688 struct nouveau_pm_memtiming *t) 258 struct nouveau_pm_memtiming *t)
689{ 259{
260 struct nouveau_drm *drm = nouveau_drm(dev);
690 u8 cl = e->tCL - 4; 261 u8 cl = e->tCL - 4;
691 262
692 t->drive_strength = 0; 263 t->drive_strength = 0;
@@ -697,17 +268,17 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
697 } 268 }
698 269
699 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { 270 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
700 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 271 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
701 return -ERANGE; 272 return -ERANGE;
702 } 273 }
703 274
704 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { 275 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
705 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 276 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
706 return -ERANGE; 277 return -ERANGE;
707 } 278 }
708 279
709 if (e->tCWL < 5) { 280 if (e->tCWL < 5) {
710 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL); 281 NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
711 return -ERANGE; 282 return -ERANGE;
712 } 283 }
713 284
@@ -722,13 +293,13 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
722 (t->odt & 0x4) << 7; 293 (t->odt & 0x4) << 7;
723 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; 294 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
724 295
725 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); 296 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
726 return 0; 297 return 0;
727} 298}
728 299
729uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { 300static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
730 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; 301 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
731uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { 302static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
732 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; 303 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
733 304
734static int 305static int
@@ -737,6 +308,8 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
737 struct nouveau_pm_memtiming *boot, 308 struct nouveau_pm_memtiming *boot,
738 struct nouveau_pm_memtiming *t) 309 struct nouveau_pm_memtiming *t)
739{ 310{
311 struct nouveau_drm *drm = nouveau_drm(dev);
312
740 if (len < 15) { 313 if (len < 15) {
741 t->drive_strength = boot->drive_strength; 314 t->drive_strength = boot->drive_strength;
742 t->odt = boot->odt; 315 t->odt = boot->odt;
@@ -746,17 +319,17 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
746 } 319 }
747 320
748 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { 321 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
749 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 322 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
750 return -ERANGE; 323 return -ERANGE;
751 } 324 }
752 325
753 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { 326 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
754 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 327 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
755 return -ERANGE; 328 return -ERANGE;
756 } 329 }
757 330
758 if (t->odt > 3) { 331 if (t->odt > 3) {
759 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", 332 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
760 t->id, t->odt); 333 t->id, t->odt);
761 t->odt = 0; 334 t->odt = 0;
762 } 335 }
@@ -770,7 +343,7 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
770 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; 343 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
771 t->mr[2] = boot->mr[2]; 344 t->mr[2] = boot->mr[2];
772 345
773 NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id, 346 NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
774 t->mr[0], t->mr[1], t->mr[2]); 347 t->mr[0], t->mr[1], t->mr[2]);
775 return 0; 348 return 0;
776} 349}
@@ -781,6 +354,8 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
781 struct nouveau_pm_memtiming *boot, 354 struct nouveau_pm_memtiming *boot,
782 struct nouveau_pm_memtiming *t) 355 struct nouveau_pm_memtiming *t)
783{ 356{
357 struct nouveau_drm *drm = nouveau_drm(dev);
358
784 if (len < 15) { 359 if (len < 15) {
785 t->drive_strength = boot->drive_strength; 360 t->drive_strength = boot->drive_strength;
786 t->odt = boot->odt; 361 t->odt = boot->odt;
@@ -790,17 +365,17 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
790 } 365 }
791 366
792 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { 367 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
793 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 368 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
794 return -ERANGE; 369 return -ERANGE;
795 } 370 }
796 371
797 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { 372 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
798 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 373 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
799 return -ERANGE; 374 return -ERANGE;
800 } 375 }
801 376
802 if (t->odt > 3) { 377 if (t->odt > 3) {
803 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", 378 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
804 t->id, t->odt); 379 t->id, t->odt);
805 t->odt = 0; 380 t->odt = 0;
806 } 381 }
@@ -812,7 +387,7 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
812 t->drive_strength | 387 t->drive_strength |
813 (t->odt << 2); 388 (t->odt << 2);
814 389
815 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); 390 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
816 return 0; 391 return 0;
817} 392}
818 393
@@ -820,8 +395,9 @@ int
820nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, 395nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
821 struct nouveau_pm_memtiming *t) 396 struct nouveau_pm_memtiming *t)
822{ 397{
823 struct drm_nouveau_private *dev_priv = dev->dev_private; 398 struct nouveau_device *device = nouveau_dev(dev);
824 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 399 struct nouveau_fb *pfb = nouveau_fb(device);
400 struct nouveau_pm *pm = nouveau_pm(dev);
825 struct nouveau_pm_memtiming *boot = &pm->boot.timing; 401 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
826 struct nouveau_pm_tbl_entry *e; 402 struct nouveau_pm_tbl_entry *e;
827 u8 ver, len, *ptr, *ramcfg; 403 u8 ver, len, *ptr, *ramcfg;
@@ -836,7 +412,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
836 412
837 t->tCWL = boot->tCWL; 413 t->tCWL = boot->tCWL;
838 414
839 switch (dev_priv->card_type) { 415 switch (device->card_type) {
840 case NV_40: 416 case NV_40:
841 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); 417 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
842 break; 418 break;
@@ -852,7 +428,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
852 break; 428 break;
853 } 429 }
854 430
855 switch (dev_priv->vram_type * !ret) { 431 switch (pfb->ram.type * !ret) {
856 case NV_MEM_TYPE_GDDR3: 432 case NV_MEM_TYPE_GDDR3:
857 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); 433 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
858 break; 434 break;
@@ -879,7 +455,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
879 else 455 else
880 dll_off = !!(ramcfg[2] & 0x40); 456 dll_off = !!(ramcfg[2] & 0x40);
881 457
882 switch (dev_priv->vram_type) { 458 switch (pfb->ram.type) {
883 case NV_MEM_TYPE_GDDR3: 459 case NV_MEM_TYPE_GDDR3:
884 t->mr[1] &= ~0x00000040; 460 t->mr[1] &= ~0x00000040;
885 t->mr[1] |= 0x00000040 * dll_off; 461 t->mr[1] |= 0x00000040 * dll_off;
@@ -897,11 +473,12 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
897void 473void
898nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) 474nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
899{ 475{
900 struct drm_nouveau_private *dev_priv = dev->dev_private; 476 struct nouveau_device *device = nouveau_dev(dev);
477 struct nouveau_fb *pfb = nouveau_fb(device);
901 u32 timing_base, timing_regs, mr_base; 478 u32 timing_base, timing_regs, mr_base;
902 int i; 479 int i;
903 480
904 if (dev_priv->card_type >= 0xC0) { 481 if (device->card_type >= 0xC0) {
905 timing_base = 0x10f290; 482 timing_base = 0x10f290;
906 mr_base = 0x10f300; 483 mr_base = 0x10f300;
907 } else { 484 } else {
@@ -911,7 +488,7 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
911 488
912 t->id = -1; 489 t->id = -1;
913 490
914 switch (dev_priv->card_type) { 491 switch (device->card_type) {
915 case NV_50: 492 case NV_50:
916 timing_regs = 9; 493 timing_regs = 9;
917 break; 494 break;
@@ -928,24 +505,24 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
928 return; 505 return;
929 } 506 }
930 for(i = 0; i < timing_regs; i++) 507 for(i = 0; i < timing_regs; i++)
931 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i)); 508 t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
932 509
933 t->tCWL = 0; 510 t->tCWL = 0;
934 if (dev_priv->card_type < NV_C0) { 511 if (device->card_type < NV_C0) {
935 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1; 512 t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
936 } else if (dev_priv->card_type <= NV_D0) { 513 } else if (device->card_type <= NV_D0) {
937 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7); 514 t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
938 } 515 }
939 516
940 t->mr[0] = nv_rd32(dev, mr_base); 517 t->mr[0] = nv_rd32(device, mr_base);
941 t->mr[1] = nv_rd32(dev, mr_base + 0x04); 518 t->mr[1] = nv_rd32(device, mr_base + 0x04);
942 t->mr[2] = nv_rd32(dev, mr_base + 0x20); 519 t->mr[2] = nv_rd32(device, mr_base + 0x20);
943 t->mr[3] = nv_rd32(dev, mr_base + 0x24); 520 t->mr[3] = nv_rd32(device, mr_base + 0x24);
944 521
945 t->odt = 0; 522 t->odt = 0;
946 t->drive_strength = 0; 523 t->drive_strength = 0;
947 524
948 switch (dev_priv->vram_type) { 525 switch (pfb->ram.type) {
949 case NV_MEM_TYPE_DDR3: 526 case NV_MEM_TYPE_DDR3:
950 t->odt |= (t->mr[1] & 0x200) >> 7; 527 t->odt |= (t->mr[1] & 0x200) >> 7;
951 case NV_MEM_TYPE_DDR2: 528 case NV_MEM_TYPE_DDR2:
@@ -966,13 +543,15 @@ int
966nouveau_mem_exec(struct nouveau_mem_exec_func *exec, 543nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
967 struct nouveau_pm_level *perflvl) 544 struct nouveau_pm_level *perflvl)
968{ 545{
969 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 546 struct nouveau_drm *drm = nouveau_drm(exec->dev);
547 struct nouveau_device *device = nouveau_dev(exec->dev);
548 struct nouveau_fb *pfb = nouveau_fb(device);
970 struct nouveau_pm_memtiming *info = &perflvl->timing; 549 struct nouveau_pm_memtiming *info = &perflvl->timing;
971 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; 550 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
972 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; 551 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
973 u32 mr1_dlloff; 552 u32 mr1_dlloff;
974 553
975 switch (dev_priv->vram_type) { 554 switch (pfb->ram.type) {
976 case NV_MEM_TYPE_DDR2: 555 case NV_MEM_TYPE_DDR2:
977 tDLLK = 2000; 556 tDLLK = 2000;
978 mr1_dlloff = 0x00000001; 557 mr1_dlloff = 0x00000001;
@@ -988,12 +567,12 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
988 mr1_dlloff = 0x00000040; 567 mr1_dlloff = 0x00000040;
989 break; 568 break;
990 default: 569 default:
991 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n"); 570 NV_ERROR(drm, "cannot reclock unsupported memtype\n");
992 return -ENODEV; 571 return -ENODEV;
993 } 572 }
994 573
995 /* fetch current MRs */ 574 /* fetch current MRs */
996 switch (dev_priv->vram_type) { 575 switch (pfb->ram.type) {
997 case NV_MEM_TYPE_GDDR3: 576 case NV_MEM_TYPE_GDDR3:
998 case NV_MEM_TYPE_DDR3: 577 case NV_MEM_TYPE_DDR3:
999 mr[2] = exec->mrg(exec, 2); 578 mr[2] = exec->mrg(exec, 2);
@@ -1060,194 +639,9 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
1060 exec->mrs (exec, 0, info->mr[0] | 0x00000000); 639 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1061 exec->wait(exec, tMRD); 640 exec->wait(exec, tMRD);
1062 exec->wait(exec, tDLLK); 641 exec->wait(exec, tDLLK);
1063 if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3) 642 if (pfb->ram.type == NV_MEM_TYPE_GDDR3)
1064 exec->precharge(exec); 643 exec->precharge(exec);
1065 } 644 }
1066 645
1067 return 0; 646 return 0;
1068} 647}
1069
1070int
1071nouveau_mem_vbios_type(struct drm_device *dev)
1072{
1073 struct bit_entry M;
1074 u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1075 if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1076 u8 *table = ROMPTR(dev, M.data[3]);
1077 if (table && table[0] == 0x10 && ramcfg < table[3]) {
1078 u8 *entry = table + table[1] + (ramcfg * table[2]);
1079 switch (entry[0] & 0x0f) {
1080 case 0: return NV_MEM_TYPE_DDR2;
1081 case 1: return NV_MEM_TYPE_DDR3;
1082 case 2: return NV_MEM_TYPE_GDDR3;
1083 case 3: return NV_MEM_TYPE_GDDR5;
1084 default:
1085 break;
1086 }
1087
1088 }
1089 }
1090 return NV_MEM_TYPE_UNKNOWN;
1091}
1092
1093static int
1094nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1095{
1096 /* nothing to do */
1097 return 0;
1098}
1099
1100static int
1101nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1102{
1103 /* nothing to do */
1104 return 0;
1105}
1106
1107static inline void
1108nouveau_mem_node_cleanup(struct nouveau_mem *node)
1109{
1110 if (node->vma[0].node) {
1111 nouveau_vm_unmap(&node->vma[0]);
1112 nouveau_vm_put(&node->vma[0]);
1113 }
1114
1115 if (node->vma[1].node) {
1116 nouveau_vm_unmap(&node->vma[1]);
1117 nouveau_vm_put(&node->vma[1]);
1118 }
1119}
1120
1121static void
1122nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1123 struct ttm_mem_reg *mem)
1124{
1125 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1126 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1127 struct drm_device *dev = dev_priv->dev;
1128
1129 nouveau_mem_node_cleanup(mem->mm_node);
1130 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1131}
1132
1133static int
1134nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1135 struct ttm_buffer_object *bo,
1136 struct ttm_placement *placement,
1137 struct ttm_mem_reg *mem)
1138{
1139 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1140 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1141 struct drm_device *dev = dev_priv->dev;
1142 struct nouveau_bo *nvbo = nouveau_bo(bo);
1143 struct nouveau_mem *node;
1144 u32 size_nc = 0;
1145 int ret;
1146
1147 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1148 size_nc = 1 << nvbo->page_shift;
1149
1150 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1151 mem->page_alignment << PAGE_SHIFT, size_nc,
1152 (nvbo->tile_flags >> 8) & 0x3ff, &node);
1153 if (ret) {
1154 mem->mm_node = NULL;
1155 return (ret == -ENOSPC) ? 0 : ret;
1156 }
1157
1158 node->page_shift = nvbo->page_shift;
1159
1160 mem->mm_node = node;
1161 mem->start = node->offset >> PAGE_SHIFT;
1162 return 0;
1163}
1164
1165void
1166nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1167{
1168 struct nouveau_mm *mm = man->priv;
1169 struct nouveau_mm_node *r;
1170 u32 total = 0, free = 0;
1171
1172 mutex_lock(&mm->mutex);
1173 list_for_each_entry(r, &mm->nodes, nl_entry) {
1174 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1175 prefix, r->type, ((u64)r->offset << 12),
1176 (((u64)r->offset + r->length) << 12));
1177
1178 total += r->length;
1179 if (!r->type)
1180 free += r->length;
1181 }
1182 mutex_unlock(&mm->mutex);
1183
1184 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
1185 prefix, (u64)total << 12, (u64)free << 12);
1186 printk(KERN_DEBUG "%s block: 0x%08x\n",
1187 prefix, mm->block_size << 12);
1188}
1189
1190const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1191 nouveau_vram_manager_init,
1192 nouveau_vram_manager_fini,
1193 nouveau_vram_manager_new,
1194 nouveau_vram_manager_del,
1195 nouveau_vram_manager_debug
1196};
1197
1198static int
1199nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1200{
1201 return 0;
1202}
1203
1204static int
1205nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1206{
1207 return 0;
1208}
1209
1210static void
1211nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1212 struct ttm_mem_reg *mem)
1213{
1214 nouveau_mem_node_cleanup(mem->mm_node);
1215 kfree(mem->mm_node);
1216 mem->mm_node = NULL;
1217}
1218
1219static int
1220nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1221 struct ttm_buffer_object *bo,
1222 struct ttm_placement *placement,
1223 struct ttm_mem_reg *mem)
1224{
1225 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1226 struct nouveau_mem *node;
1227
1228 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1229 dev_priv->gart_info.aper_size))
1230 return -ENOMEM;
1231
1232 node = kzalloc(sizeof(*node), GFP_KERNEL);
1233 if (!node)
1234 return -ENOMEM;
1235 node->page_shift = 12;
1236
1237 mem->mm_node = node;
1238 mem->start = 0;
1239 return 0;
1240}
1241
1242void
1243nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1244{
1245}
1246
1247const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1248 nouveau_gart_manager_init,
1249 nouveau_gart_manager_fini,
1250 nouveau_gart_manager_new,
1251 nouveau_gart_manager_del,
1252 nouveau_gart_manager_debug
1253};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
deleted file mode 100644
index 57a600c35c95..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_REGION_H__
26#define __NOUVEAU_REGION_H__
27
28struct nouveau_mm_node {
29 struct list_head nl_entry;
30 struct list_head fl_entry;
31 struct list_head rl_entry;
32
33 u8 type;
34 u32 offset;
35 u32 length;
36};
37
38struct nouveau_mm {
39 struct list_head nodes;
40 struct list_head free;
41
42 struct mutex mutex;
43
44 u32 block_size;
45 int heap_nodes;
46};
47
48int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
49int nouveau_mm_fini(struct nouveau_mm *);
50int nouveau_mm_pre(struct nouveau_mm *);
51int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
52 u32 align, struct nouveau_mm_node **);
53void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
54
55int nv50_vram_init(struct drm_device *);
56void nv50_vram_fini(struct drm_device *);
57int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
58 u32 memtype, struct nouveau_mem **);
59void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
60bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
61
62int nvc0_vram_init(struct drm_device *);
63int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
64 u32 memtype, struct nouveau_mem **);
65bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
66
67#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
deleted file mode 100644
index 07d0d1e03690..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ /dev/null
@@ -1,723 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/acpi.h>
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29
30#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
31#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
32
33static u8 *
34mxms_data(struct drm_device *dev)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 return dev_priv->mxms;
38
39}
40
41static u16
42mxms_version(struct drm_device *dev)
43{
44 u8 *mxms = mxms_data(dev);
45 u16 version = (mxms[4] << 8) | mxms[5];
46 switch (version ) {
47 case 0x0200:
48 case 0x0201:
49 case 0x0300:
50 return version;
51 default:
52 break;
53 }
54
55 MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
56 return 0x0000;
57}
58
59static u16
60mxms_headerlen(struct drm_device *dev)
61{
62 return 8;
63}
64
65static u16
66mxms_structlen(struct drm_device *dev)
67{
68 return *(u16 *)&mxms_data(dev)[6];
69}
70
71static bool
72mxms_checksum(struct drm_device *dev)
73{
74 u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
75 u8 *mxms = mxms_data(dev), sum = 0;
76 while (size--)
77 sum += *mxms++;
78 if (sum) {
79 MXM_DBG(dev, "checksum invalid\n");
80 return false;
81 }
82 return true;
83}
84
85static bool
86mxms_valid(struct drm_device *dev)
87{
88 u8 *mxms = mxms_data(dev);
89 if (*(u32 *)mxms != 0x5f4d584d) {
90 MXM_DBG(dev, "signature invalid\n");
91 return false;
92 }
93
94 if (!mxms_version(dev) || !mxms_checksum(dev))
95 return false;
96
97 return true;
98}
99
100static bool
101mxms_foreach(struct drm_device *dev, u8 types,
102 bool (*exec)(struct drm_device *, u8 *, void *), void *info)
103{
104 u8 *mxms = mxms_data(dev);
105 u8 *desc = mxms + mxms_headerlen(dev);
106 u8 *fini = desc + mxms_structlen(dev) - 1;
107 while (desc < fini) {
108 u8 type = desc[0] & 0x0f;
109 u8 headerlen = 0;
110 u8 recordlen = 0;
111 u8 entries = 0;
112
113 switch (type) {
114 case 0: /* Output Device Structure */
115 if (mxms_version(dev) >= 0x0300)
116 headerlen = 8;
117 else
118 headerlen = 6;
119 break;
120 case 1: /* System Cooling Capability Structure */
121 case 2: /* Thermal Structure */
122 case 3: /* Input Power Structure */
123 headerlen = 4;
124 break;
125 case 4: /* GPIO Device Structure */
126 headerlen = 4;
127 recordlen = 2;
128 entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
129 break;
130 case 5: /* Vendor Specific Structure */
131 headerlen = 8;
132 break;
133 case 6: /* Backlight Control Structure */
134 if (mxms_version(dev) >= 0x0300) {
135 headerlen = 4;
136 recordlen = 8;
137 entries = (desc[1] & 0xf0) >> 4;
138 } else {
139 headerlen = 8;
140 }
141 break;
142 case 7: /* Fan Control Structure */
143 headerlen = 8;
144 recordlen = 4;
145 entries = desc[1] & 0x07;
146 break;
147 default:
148 MXM_DBG(dev, "unknown descriptor type %d\n", type);
149 return false;
150 }
151
152 if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
153 static const char * mxms_desc_name[] = {
154 "ODS", "SCCS", "TS", "IPS",
155 "GSD", "VSS", "BCS", "FCS",
156 };
157 u8 *dump = desc;
158 int i, j;
159
160 MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
161 for (j = headerlen - 1; j >= 0; j--)
162 printk("%02x", dump[j]);
163 printk("\n");
164 dump += headerlen;
165
166 for (i = 0; i < entries; i++, dump += recordlen) {
167 MXM_DBG(dev, " ");
168 for (j = recordlen - 1; j >= 0; j--)
169 printk("%02x", dump[j]);
170 printk("\n");
171 }
172 }
173
174 if (types & (1 << type)) {
175 if (!exec(dev, desc, info))
176 return false;
177 }
178
179 desc += headerlen + (entries * recordlen);
180 }
181
182 return true;
183}
184
185static u8 *
186mxm_table(struct drm_device *dev, u8 *size)
187{
188 struct bit_entry x;
189
190 if (bit_table(dev, 'x', &x)) {
191 MXM_DBG(dev, "BIT 'x' table not present\n");
192 return NULL;
193 }
194
195 if (x.version != 1 || x.length < 3) {
196 MXM_MSG(dev, "BIT x table %d/%d unknown\n",
197 x.version, x.length);
198 return NULL;
199 }
200
201 *size = x.length;
202 return x.data;
203}
204
205/* These map MXM v2.x digital connection values to the appropriate SOR/link,
206 * hopefully they're correct for all boards within the same chipset...
207 *
208 * MXM v3.x VBIOS are nicer and provide pointers to these tables.
209 */
210static u8 nv84_sor_map[16] = {
211 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
213};
214
215static u8 nv92_sor_map[16] = {
216 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
217 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
218};
219
220static u8 nv94_sor_map[16] = {
221 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
222 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
223};
224
225static u8 nv96_sor_map[16] = {
226 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
227 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
228};
229
230static u8 nv98_sor_map[16] = {
231 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
232 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
233};
234
235static u8
236mxm_sor_map(struct drm_device *dev, u8 conn)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 u8 len, *mxm = mxm_table(dev, &len);
240 if (mxm && len >= 6) {
241 u8 *map = ROMPTR(dev, mxm[4]);
242 if (map) {
243 if (map[0] == 0x10) {
244 if (conn < map[3])
245 return map[map[1] + conn];
246 return 0x00;
247 }
248
249 MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
250 }
251 }
252
253 if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
254 return nv84_sor_map[conn];
255 if (dev_priv->chipset == 0x92)
256 return nv92_sor_map[conn];
257 if (dev_priv->chipset == 0x94)
258 return nv94_sor_map[conn];
259 if (dev_priv->chipset == 0x96)
260 return nv96_sor_map[conn];
261 if (dev_priv->chipset == 0x98)
262 return nv98_sor_map[conn];
263
264 MXM_MSG(dev, "missing sor map\n");
265 return 0x00;
266}
267
268static u8
269mxm_ddc_map(struct drm_device *dev, u8 port)
270{
271 u8 len, *mxm = mxm_table(dev, &len);
272 if (mxm && len >= 8) {
273 u8 *map = ROMPTR(dev, mxm[6]);
274 if (map) {
275 if (map[0] == 0x10) {
276 if (port < map[3])
277 return map[map[1] + port];
278 return 0x00;
279 }
280
281 MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
282 }
283 }
284
285 /* v2.x: directly write port as dcb i2cidx */
286 return (port << 4) | port;
287}
288
289struct mxms_odev {
290 u8 outp_type;
291 u8 conn_type;
292 u8 ddc_port;
293 u8 dig_conn;
294};
295
296static void
297mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
298{
299 u64 data = ROM32(pdata[0]);
300 if (mxms_version(dev) >= 0x0300)
301 data |= (u64)ROM16(pdata[4]) << 32;
302
303 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
304 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
305 desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
306 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
307}
308
309struct context {
310 u32 *outp;
311 struct mxms_odev desc;
312};
313
314static bool
315mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
316{
317 struct context *ctx = info;
318 struct mxms_odev desc;
319
320 mxms_output_device(dev, data, &desc);
321 if (desc.outp_type == 2 &&
322 desc.dig_conn == ctx->desc.dig_conn)
323 return false;
324 return true;
325}
326
327static bool
328mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
329{
330 struct context *ctx = info;
331 u64 desc = *(u64 *)data;
332
333 mxms_output_device(dev, data, &ctx->desc);
334
335 /* match dcb encoder type to mxm-ods device type */
336 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
337 return true;
338
339 /* digital output, have some extra stuff to match here, there's a
340 * table in the vbios that provides a mapping from the mxm digital
341 * connection enum values to SOR/link
342 */
343 if ((desc & 0x00000000000000f0) >= 0x20) {
344 /* check against sor index */
345 u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
346 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
347 return true;
348
349 /* check dcb entry has a compatible link field */
350 link = (link & 0x30) >> 4;
351 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
352 return true;
353 }
354
355 /* mark this descriptor accounted for by setting invalid device type,
356 * except of course some manufactures don't follow specs properly and
357 * we need to avoid killing off the TMDS function on DP connectors
358 * if MXM-SIS is missing an entry for it.
359 */
360 data[0] &= ~0xf0;
361 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
362 mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
363 data[0] |= 0x20; /* modify descriptor to match TMDS now */
364 } else {
365 data[0] |= 0xf0;
366 }
367
368 return false;
369}
370
371static int
372mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
373{
374 struct context ctx = { .outp = (u32 *)dcbe };
375 u8 type, i2cidx, link;
376 u8 *conn;
377
378 /* look for an output device structure that matches this dcb entry.
379 * if one isn't found, disable it.
380 */
381 if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
382 MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
383 idx, ctx.outp[0], ctx.outp[1]);
384 ctx.outp[0] |= 0x0000000f;
385 return 0;
386 }
387
388 /* modify the output's ddc/aux port, there's a pointer to a table
389 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
390 * vbios mxm table
391 */
392 i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
393 if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
394 i2cidx = (i2cidx & 0x0f) << 4;
395 else
396 i2cidx = (i2cidx & 0xf0);
397
398 if (i2cidx != 0xf0) {
399 ctx.outp[0] &= ~0x000000f0;
400 ctx.outp[0] |= i2cidx;
401 }
402
403 /* override dcb sorconf.link, based on what mxm data says */
404 switch (ctx.desc.outp_type) {
405 case 0x00: /* Analog CRT */
406 case 0x01: /* Analog TV/HDTV */
407 break;
408 default:
409 link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
410 ctx.outp[1] &= ~0x00000030;
411 ctx.outp[1] |= link;
412 break;
413 }
414
415 /* we may need to fixup various other vbios tables based on what
416 * the descriptor says the connector type should be.
417 *
418 * in a lot of cases, the vbios tables will claim DVI-I is possible,
419 * and the mxm data says the connector is really HDMI. another
420 * common example is DP->eDP.
421 */
422 conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
423 type = conn[0];
424 switch (ctx.desc.conn_type) {
425 case 0x01: /* LVDS */
426 ctx.outp[1] |= 0x00000004; /* use_power_scripts */
427 /* XXX: modify default link width in LVDS table */
428 break;
429 case 0x02: /* HDMI */
430 type = DCB_CONNECTOR_HDMI_1;
431 break;
432 case 0x03: /* DVI-D */
433 type = DCB_CONNECTOR_DVI_D;
434 break;
435 case 0x0e: /* eDP, falls through to DPint */
436 ctx.outp[1] |= 0x00010000;
437 case 0x07: /* DP internal, wtf is this?? HP8670w */
438 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
439 type = DCB_CONNECTOR_eDP;
440 break;
441 default:
442 break;
443 }
444
445 if (mxms_version(dev) >= 0x0300)
446 conn[0] = type;
447
448 return 0;
449}
450
451static bool
452mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
453{
454 u64 desc = *(u64 *)data;
455 if ((desc & 0xf0) != 0xf0)
456 MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
457 return true;
458}
459
460static void
461mxm_dcb_sanitise(struct drm_device *dev)
462{
463 u8 *dcb = dcb_table(dev);
464 if (!dcb || dcb[0] != 0x40) {
465 MXM_DBG(dev, "unsupported DCB version\n");
466 return;
467 }
468
469 dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
470 mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
471}
472
473static bool
474mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
475 u8 offset, u8 size, u8 *data)
476{
477 struct i2c_msg msgs[] = {
478 { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
479 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
480 };
481
482 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
483}
484
485static bool
486mxm_shadow_rom(struct drm_device *dev, u8 version)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_i2c_chan *i2c = NULL;
490 u8 i2cidx, mxms[6], addr, size;
491
492 i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
493 if (i2cidx < 0x0f)
494 i2c = nouveau_i2c_find(dev, i2cidx);
495 if (!i2c)
496 return false;
497
498 addr = 0x54;
499 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
500 addr = 0x56;
501 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
502 return false;
503 }
504
505 dev_priv->mxms = mxms;
506 size = mxms_headerlen(dev) + mxms_structlen(dev);
507 dev_priv->mxms = kmalloc(size, GFP_KERNEL);
508
509 if (dev_priv->mxms &&
510 mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
511 return true;
512
513 kfree(dev_priv->mxms);
514 dev_priv->mxms = NULL;
515 return false;
516}
517
518#if defined(CONFIG_ACPI)
519static bool
520mxm_shadow_dsm(struct drm_device *dev, u8 version)
521{
522 struct drm_nouveau_private *dev_priv = dev->dev_private;
523 static char muid[] = {
524 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
525 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
526 };
527 u32 mxms_args[] = { 0x00000000 };
528 union acpi_object args[4] = {
529 /* _DSM MUID */
530 { .buffer.type = 3,
531 .buffer.length = sizeof(muid),
532 .buffer.pointer = muid,
533 },
534 /* spec says this can be zero to mean "highest revision", but
535 * of course there's at least one bios out there which fails
536 * unless you pass in exactly the version it supports..
537 */
538 { .integer.type = ACPI_TYPE_INTEGER,
539 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
540 },
541 /* MXMS function */
542 { .integer.type = ACPI_TYPE_INTEGER,
543 .integer.value = 0x00000010,
544 },
545 /* Pointer to MXMS arguments */
546 { .buffer.type = ACPI_TYPE_BUFFER,
547 .buffer.length = sizeof(mxms_args),
548 .buffer.pointer = (char *)mxms_args,
549 },
550 };
551 struct acpi_object_list list = { ARRAY_SIZE(args), args };
552 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
553 union acpi_object *obj;
554 acpi_handle handle;
555 int ret;
556
557 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
558 if (!handle)
559 return false;
560
561 ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
562 if (ret) {
563 MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
564 return false;
565 }
566
567 obj = retn.pointer;
568 if (obj->type == ACPI_TYPE_BUFFER) {
569 dev_priv->mxms = kmemdup(obj->buffer.pointer,
570 obj->buffer.length, GFP_KERNEL);
571 } else
572 if (obj->type == ACPI_TYPE_INTEGER) {
573 MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
574 }
575
576 kfree(obj);
577 return dev_priv->mxms != NULL;
578}
579#endif
580
581#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
582
583#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
584
585static u8
586wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
587{
588 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
589 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
590 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
591 union acpi_object *obj;
592 acpi_status status;
593
594 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
595 if (ACPI_FAILURE(status)) {
596 MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
597 return 0x00;
598 }
599
600 obj = retn.pointer;
601 if (obj->type == ACPI_TYPE_INTEGER) {
602 version = obj->integer.value;
603 MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
604 (version >> 4), version & 0x0f);
605 } else {
606 version = 0;
607 MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
608 }
609
610 kfree(obj);
611 return version;
612}
613
614static bool
615mxm_shadow_wmi(struct drm_device *dev, u8 version)
616{
617 struct drm_nouveau_private *dev_priv = dev->dev_private;
618 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
619 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
620 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
621 union acpi_object *obj;
622 acpi_status status;
623
624 if (!wmi_has_guid(WMI_WMMX_GUID)) {
625 MXM_DBG(dev, "WMMX GUID not found\n");
626 return false;
627 }
628
629 mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
630 if (!mxms_args[1])
631 mxms_args[1] = wmi_wmmx_mxmi(dev, version);
632 if (!mxms_args[1])
633 return false;
634
635 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
636 if (ACPI_FAILURE(status)) {
637 MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
638 return false;
639 }
640
641 obj = retn.pointer;
642 if (obj->type == ACPI_TYPE_BUFFER) {
643 dev_priv->mxms = kmemdup(obj->buffer.pointer,
644 obj->buffer.length, GFP_KERNEL);
645 }
646
647 kfree(obj);
648 return dev_priv->mxms != NULL;
649}
650#endif
651
652struct mxm_shadow_h {
653 const char *name;
654 bool (*exec)(struct drm_device *, u8 version);
655} _mxm_shadow[] = {
656 { "ROM", mxm_shadow_rom },
657#if defined(CONFIG_ACPI)
658 { "DSM", mxm_shadow_dsm },
659#endif
660#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
661 { "WMI", mxm_shadow_wmi },
662#endif
663 {}
664};
665
666static int
667mxm_shadow(struct drm_device *dev, u8 version)
668{
669 struct drm_nouveau_private *dev_priv = dev->dev_private;
670 struct mxm_shadow_h *shadow = _mxm_shadow;
671 do {
672 MXM_DBG(dev, "checking %s\n", shadow->name);
673 if (shadow->exec(dev, version)) {
674 if (mxms_valid(dev))
675 return 0;
676 kfree(dev_priv->mxms);
677 dev_priv->mxms = NULL;
678 }
679 } while ((++shadow)->name);
680 return -ENOENT;
681}
682
683int
684nouveau_mxm_init(struct drm_device *dev)
685{
686 u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
687 if (!mxm || !mxm[0]) {
688 MXM_MSG(dev, "no VBIOS data, nothing to do\n");
689 return 0;
690 }
691
692 MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
693
694 if (mxm_shadow(dev, mxm[0])) {
695 MXM_MSG(dev, "failed to locate valid SIS\n");
696#if 0
697 /* we should, perhaps, fall back to some kind of limited
698 * mode here if the x86 vbios hasn't already done the
699 * work for us (so we prevent loading with completely
700 * whacked vbios tables).
701 */
702 return -EINVAL;
703#else
704 return 0;
705#endif
706 }
707
708 MXM_MSG(dev, "MXMS Version %d.%d\n",
709 mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
710 mxms_foreach(dev, 0, NULL, NULL);
711
712 if (nouveau_mxmdcb)
713 mxm_dcb_sanitise(dev);
714 return 0;
715}
716
717void
718nouveau_mxm_fini(struct drm_device *dev)
719{
720 struct drm_nouveau_private *dev_priv = dev->dev_private;
721 kfree(dev_priv->mxms);
722 dev_priv->mxms = NULL;
723}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
deleted file mode 100644
index 69c93b864519..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31#include "nouveau_ramht.h"
32
33int
34nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{
36 struct drm_device *dev = chan->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_bo *ntfy = NULL;
39 uint32_t flags, ttmpl;
40 int ret;
41
42 if (nouveau_vram_notify) {
43 flags = NOUVEAU_GEM_DOMAIN_VRAM;
44 ttmpl = TTM_PL_FLAG_VRAM;
45 } else {
46 flags = NOUVEAU_GEM_DOMAIN_GART;
47 ttmpl = TTM_PL_FLAG_TT;
48 }
49
50 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
51 if (ret)
52 return ret;
53
54 ret = nouveau_bo_pin(ntfy, ttmpl);
55 if (ret)
56 goto out_err;
57
58 ret = nouveau_bo_map(ntfy);
59 if (ret)
60 goto out_err;
61
62 if (dev_priv->card_type >= NV_50) {
63 ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
64 if (ret)
65 goto out_err;
66 }
67
68 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
69 if (ret)
70 goto out_err;
71
72 chan->notifier_bo = ntfy;
73out_err:
74 if (ret) {
75 nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
76 drm_gem_object_unreference_unlocked(ntfy->gem);
77 }
78
79 return ret;
80}
81
82void
83nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
84{
85 struct drm_device *dev = chan->dev;
86
87 if (!chan->notifier_bo)
88 return;
89
90 nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
91 nouveau_bo_unmap(chan->notifier_bo);
92 mutex_lock(&dev->struct_mutex);
93 nouveau_bo_unpin(chan->notifier_bo);
94 mutex_unlock(&dev->struct_mutex);
95 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
96 drm_mm_takedown(&chan->notifier_heap);
97}
98
99static void
100nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
101 struct nouveau_gpuobj *gpuobj)
102{
103 NV_DEBUG(dev, "\n");
104
105 if (gpuobj->priv)
106 drm_mm_put_block(gpuobj->priv);
107}
108
109int
110nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
111 int size, uint32_t start, uint32_t end,
112 uint32_t *b_offset)
113{
114 struct drm_device *dev = chan->dev;
115 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 struct nouveau_gpuobj *nobj = NULL;
117 struct drm_mm_node *mem;
118 uint64_t offset;
119 int target, ret;
120
121 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
122 start, end, 0);
123 if (mem)
124 mem = drm_mm_get_block_range(mem, size, 0, start, end);
125 if (!mem) {
126 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
127 return -ENOMEM;
128 }
129
130 if (dev_priv->card_type < NV_50) {
131 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
132 target = NV_MEM_TARGET_VRAM;
133 else
134 target = NV_MEM_TARGET_GART;
135 offset = chan->notifier_bo->bo.offset;
136 } else {
137 target = NV_MEM_TARGET_VM;
138 offset = chan->notifier_vma.offset;
139 }
140 offset += mem->start;
141
142 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
143 mem->size, NV_MEM_ACCESS_RW, target,
144 &nobj);
145 if (ret) {
146 drm_mm_put_block(mem);
147 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
148 return ret;
149 }
150 nobj->dtor = nouveau_notifier_gpuobj_dtor;
151 nobj->priv = mem;
152
153 ret = nouveau_ramht_insert(chan, handle, nobj);
154 nouveau_gpuobj_ref(NULL, &nobj);
155 if (ret) {
156 drm_mm_put_block(mem);
157 NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
158 return ret;
159 }
160
161 *b_offset = mem->start;
162 return 0;
163}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ea6acf1c4a78..a11d2e4f8f6e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -24,14 +24,15 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_reg.h"
28#include "nouveau_pm.h" 29#include "nouveau_pm.h"
29 30
30static u8 * 31static u8 *
31nouveau_perf_table(struct drm_device *dev, u8 *ver) 32nouveau_perf_table(struct drm_device *dev, u8 *ver)
32{ 33{
33 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_drm *drm = nouveau_drm(dev);
34 struct nvbios *bios = &dev_priv->vbios; 35 struct nvbios *bios = &drm->vbios;
35 struct bit_entry P; 36 struct bit_entry P;
36 37
37 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) { 38 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
@@ -87,7 +88,7 @@ u8 *
87nouveau_perf_rammap(struct drm_device *dev, u32 freq, 88nouveau_perf_rammap(struct drm_device *dev, u32 freq,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 89 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
89{ 90{
90 struct drm_nouveau_private *dev_priv = dev->dev_private; 91 struct nouveau_drm *drm = nouveau_drm(dev);
91 struct bit_entry P; 92 struct bit_entry P;
92 u8 *perf, i = 0; 93 u8 *perf, i = 0;
93 94
@@ -114,8 +115,8 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
114 return NULL; 115 return NULL;
115 } 116 }
116 117
117 if (dev_priv->chipset == 0x49 || 118 if (nv_device(drm->device)->chipset == 0x49 ||
118 dev_priv->chipset == 0x4b) 119 nv_device(drm->device)->chipset == 0x4b)
119 freq /= 2; 120 freq /= 2;
120 121
121 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) { 122 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
@@ -142,12 +143,13 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
142u8 * 143u8 *
143nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) 144nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
144{ 145{
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 146 struct nouveau_device *device = nouveau_dev(dev);
146 struct nvbios *bios = &dev_priv->vbios; 147 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvbios *bios = &drm->vbios;
147 u8 strap, hdr, cnt; 149 u8 strap, hdr, cnt;
148 u8 *rammap; 150 u8 *rammap;
149 151
150 strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; 152 strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
151 if (bios->ram_restrict_tbl_ptr) 153 if (bios->ram_restrict_tbl_ptr)
152 strap = bios->data[bios->ram_restrict_tbl_ptr + strap]; 154 strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
153 155
@@ -161,8 +163,8 @@ nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
161u8 * 163u8 *
162nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) 164nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
163{ 165{
164 struct drm_nouveau_private *dev_priv = dev->dev_private; 166 struct nouveau_drm *drm = nouveau_drm(dev);
165 struct nvbios *bios = &dev_priv->vbios; 167 struct nvbios *bios = &drm->vbios;
166 struct bit_entry P; 168 struct bit_entry P;
167 u8 *perf, *timing = NULL; 169 u8 *perf, *timing = NULL;
168 u8 i = 0, hdr, cnt; 170 u8 i = 0, hdr, cnt;
@@ -202,20 +204,21 @@ nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
202static void 204static void
203legacy_perf_init(struct drm_device *dev) 205legacy_perf_init(struct drm_device *dev)
204{ 206{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 207 struct nouveau_device *device = nouveau_dev(dev);
206 struct nvbios *bios = &dev_priv->vbios; 208 struct nouveau_drm *drm = nouveau_drm(dev);
207 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 209 struct nvbios *bios = &drm->vbios;
210 struct nouveau_pm *pm = nouveau_pm(dev);
208 char *perf, *entry, *bmp = &bios->data[bios->offset]; 211 char *perf, *entry, *bmp = &bios->data[bios->offset];
209 int headerlen, use_straps; 212 int headerlen, use_straps;
210 213
211 if (bmp[5] < 0x5 || bmp[6] < 0x14) { 214 if (bmp[5] < 0x5 || bmp[6] < 0x14) {
212 NV_DEBUG(dev, "BMP version too old for perf\n"); 215 NV_DEBUG(drm, "BMP version too old for perf\n");
213 return; 216 return;
214 } 217 }
215 218
216 perf = ROMPTR(dev, bmp[0x73]); 219 perf = ROMPTR(dev, bmp[0x73]);
217 if (!perf) { 220 if (!perf) {
218 NV_DEBUG(dev, "No memclock table pointer found.\n"); 221 NV_DEBUG(drm, "No memclock table pointer found.\n");
219 return; 222 return;
220 } 223 }
221 224
@@ -231,13 +234,13 @@ legacy_perf_init(struct drm_device *dev)
231 headerlen = (use_straps ? 8 : 2); 234 headerlen = (use_straps ? 8 : 2);
232 break; 235 break;
233 default: 236 default:
234 NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]); 237 NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
235 return; 238 return;
236 } 239 }
237 240
238 entry = perf + headerlen; 241 entry = perf + headerlen;
239 if (use_straps) 242 if (use_straps)
240 entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; 243 entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
241 244
242 sprintf(pm->perflvl[0].name, "performance_level_0"); 245 sprintf(pm->perflvl[0].name, "performance_level_0");
243 pm->perflvl[0].memory = ROM16(entry[0]) * 20; 246 pm->perflvl[0].memory = ROM16(entry[0]) * 20;
@@ -247,7 +250,7 @@ legacy_perf_init(struct drm_device *dev)
247static void 250static void
248nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl) 251nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
249{ 252{
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
251 struct bit_entry P; 254 struct bit_entry P;
252 u8 *vmap; 255 u8 *vmap;
253 int id; 256 int id;
@@ -258,7 +261,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
258 /* boards using voltage table version <0x40 store the voltage 261 /* boards using voltage table version <0x40 store the voltage
259 * level directly in the perflvl entry as a multiple of 10mV 262 * level directly in the perflvl entry as a multiple of 10mV
260 */ 263 */
261 if (dev_priv->engine.pm.voltage.version < 0x40) { 264 if (drm->pm->voltage.version < 0x40) {
262 perflvl->volt_min = id * 10000; 265 perflvl->volt_min = id * 10000;
263 perflvl->volt_max = perflvl->volt_min; 266 perflvl->volt_max = perflvl->volt_min;
264 return; 267 return;
@@ -268,14 +271,14 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
268 * vbios table containing a min/max voltage value for the perflvl 271 * vbios table containing a min/max voltage value for the perflvl
269 */ 272 */
270 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) { 273 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
271 NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n", 274 NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
272 P.version, P.length); 275 P.version, P.length);
273 return; 276 return;
274 } 277 }
275 278
276 vmap = ROMPTR(dev, P.data[32]); 279 vmap = ROMPTR(dev, P.data[32]);
277 if (!vmap) { 280 if (!vmap) {
278 NV_DEBUG(dev, "volt map table pointer invalid\n"); 281 NV_DEBUG(drm, "volt map table pointer invalid\n");
279 return; 282 return;
280 } 283 }
281 284
@@ -289,9 +292,9 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
289void 292void
290nouveau_perf_init(struct drm_device *dev) 293nouveau_perf_init(struct drm_device *dev)
291{ 294{
292 struct drm_nouveau_private *dev_priv = dev->dev_private; 295 struct nouveau_drm *drm = nouveau_drm(dev);
293 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 296 struct nouveau_pm *pm = nouveau_pm(dev);
294 struct nvbios *bios = &dev_priv->vbios; 297 struct nvbios *bios = &drm->vbios;
295 u8 *perf, ver, hdr, cnt, len; 298 u8 *perf, ver, hdr, cnt, len;
296 int ret, vid, i = -1; 299 int ret, vid, i = -1;
297 300
@@ -301,8 +304,6 @@ nouveau_perf_init(struct drm_device *dev)
301 } 304 }
302 305
303 perf = nouveau_perf_table(dev, &ver); 306 perf = nouveau_perf_table(dev, &ver);
304 if (ver >= 0x20 && ver < 0x40)
305 pm->fan.pwm_divisor = ROM16(perf[6]);
306 307
307 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) { 308 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
308 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 309 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
@@ -328,8 +329,8 @@ nouveau_perf_init(struct drm_device *dev)
328 perflvl->shader = ROM16(perf[6]) * 1000; 329 perflvl->shader = ROM16(perf[6]) * 1000;
329 perflvl->core = perflvl->shader; 330 perflvl->core = perflvl->shader;
330 perflvl->core += (signed char)perf[8] * 1000; 331 perflvl->core += (signed char)perf[8] * 1000;
331 if (dev_priv->chipset == 0x49 || 332 if (nv_device(drm->device)->chipset == 0x49 ||
332 dev_priv->chipset == 0x4b) 333 nv_device(drm->device)->chipset == 0x4b)
333 perflvl->memory = ROM16(perf[11]) * 1000; 334 perflvl->memory = ROM16(perf[11]) * 1000;
334 else 335 else
335 perflvl->memory = ROM16(perf[11]) * 2000; 336 perflvl->memory = ROM16(perf[11]) * 2000;
@@ -356,7 +357,7 @@ nouveau_perf_init(struct drm_device *dev)
356#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000) 357#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
357 perflvl->fanspeed = 0; /*XXX*/ 358 perflvl->fanspeed = 0; /*XXX*/
358 perflvl->volt_min = perf[2]; 359 perflvl->volt_min = perf[2];
359 if (dev_priv->card_type == NV_50) { 360 if (nv_device(drm->device)->card_type == NV_50) {
360 perflvl->core = subent(0); 361 perflvl->core = subent(0);
361 perflvl->shader = subent(1); 362 perflvl->shader = subent(1);
362 perflvl->memory = subent(2); 363 perflvl->memory = subent(2);
@@ -382,7 +383,7 @@ nouveau_perf_init(struct drm_device *dev)
382 if (pm->voltage.supported && perflvl->volt_min) { 383 if (pm->voltage.supported && perflvl->volt_min) {
383 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); 384 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
384 if (vid < 0) { 385 if (vid < 0) {
385 NV_DEBUG(dev, "perflvl %d, bad vid\n", i); 386 NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
386 continue; 387 continue;
387 } 388 }
388 } 389 }
@@ -391,7 +392,7 @@ nouveau_perf_init(struct drm_device *dev)
391 ret = nouveau_mem_timing_calc(dev, perflvl->memory, 392 ret = nouveau_mem_timing_calc(dev, perflvl->memory,
392 &perflvl->timing); 393 &perflvl->timing);
393 if (ret) { 394 if (ret) {
394 NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret); 395 NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
395 continue; 396 continue;
396 } 397 }
397 398
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index da3e7c3abab7..b9d5335df742 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -22,12 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
30
31#ifdef CONFIG_ACPI 25#ifdef CONFIG_ACPI
32#include <linux/acpi.h> 26#include <linux/acpi.h>
33#endif 27#endif
@@ -35,85 +29,41 @@
35#include <linux/hwmon.h> 29#include <linux/hwmon.h>
36#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
37 31
38static int 32#include "drmP.h"
39nouveau_pwmfan_get(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
43 struct gpio_func gpio;
44 u32 divs, duty;
45 int ret;
46 33
47 if (!pm->pwm_get) 34#include "nouveau_drm.h"
48 return -ENODEV; 35#include "nouveau_pm.h"
49 36
50 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); 37#include <subdev/gpio.h>
51 if (ret == 0) { 38#include <subdev/timer.h>
52 ret = pm->pwm_get(dev, gpio.line, &divs, &duty); 39#include <subdev/therm.h>
53 if (ret == 0 && divs) {
54 divs = max(divs, duty);
55 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
56 duty = divs - duty;
57 return (duty * 100) / divs;
58 }
59 40
60 return nouveau_gpio_func_get(dev, gpio.func) * 100; 41MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
61 } 42static char *nouveau_perflvl;
43module_param_named(perflvl, nouveau_perflvl, charp, 0400);
62 44
63 return -ENODEV; 45MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
64} 46static int nouveau_perflvl_wr;
65 47module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
66static int
67nouveau_pwmfan_set(struct drm_device *dev, int percent)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
71 struct gpio_func gpio;
72 u32 divs, duty;
73 int ret;
74
75 if (!pm->pwm_set)
76 return -ENODEV;
77
78 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
79 if (ret == 0) {
80 divs = pm->fan.pwm_divisor;
81 if (pm->fan.pwm_freq) {
82 /*XXX: PNVIO clock more than likely... */
83 divs = 135000 / pm->fan.pwm_freq;
84 if (dev_priv->chipset < 0xa3)
85 divs /= 4;
86 }
87
88 duty = ((divs * percent) + 99) / 100;
89 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
90 duty = divs - duty;
91
92 ret = pm->pwm_set(dev, gpio.line, divs, duty);
93 if (!ret)
94 pm->fan.percent = percent;
95 return ret;
96 }
97
98 return -ENODEV;
99}
100 48
101static int 49static int
102nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, 50nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
103 struct nouveau_pm_level *a, struct nouveau_pm_level *b) 51 struct nouveau_pm_level *a, struct nouveau_pm_level *b)
104{ 52{
105 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct nouveau_drm *drm = nouveau_drm(dev);
106 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm);
107 int ret; 56 int ret;
108 57
109 /*XXX: not on all boards, we should control based on temperature 58 /*XXX: not on all boards, we should control based on temperature
110 * on recent boards.. or maybe on some other factor we don't 59 * on recent boards.. or maybe on some other factor we don't
111 * know about? 60 * know about?
112 */ 61 */
113 if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) { 62 if (therm && therm->fan_set &&
114 ret = nouveau_pwmfan_set(dev, perflvl->fanspeed); 63 a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
64 ret = therm->fan_set(therm, perflvl->fanspeed);
115 if (ret && ret != -ENODEV) { 65 if (ret && ret != -ENODEV) {
116 NV_ERROR(dev, "fanspeed set failed: %d\n", ret); 66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
117 return ret; 67 return ret;
118 } 68 }
119 } 69 }
@@ -122,7 +72,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
122 if (perflvl->volt_min && b->volt_min > a->volt_min) { 72 if (perflvl->volt_min && b->volt_min > a->volt_min) {
123 ret = pm->voltage_set(dev, perflvl->volt_min); 73 ret = pm->voltage_set(dev, perflvl->volt_min);
124 if (ret) { 74 if (ret) {
125 NV_ERROR(dev, "voltage set failed: %d\n", ret); 75 NV_ERROR(drm, "voltage set failed: %d\n", ret);
126 return ret; 76 return ret;
127 } 77 }
128 } 78 }
@@ -134,8 +84,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
134static int 84static int
135nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) 85nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
136{ 86{
137 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct nouveau_pm *pm = nouveau_pm(dev);
138 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
139 void *state; 88 void *state;
140 int ret; 89 int ret;
141 90
@@ -171,8 +120,9 @@ error:
171void 120void
172nouveau_pm_trigger(struct drm_device *dev) 121nouveau_pm_trigger(struct drm_device *dev)
173{ 122{
174 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct nouveau_drm *drm = nouveau_drm(dev);
175 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 124 struct nouveau_timer *ptimer = nouveau_timer(drm->device);
125 struct nouveau_pm *pm = nouveau_pm(dev);
176 struct nouveau_pm_profile *profile = NULL; 126 struct nouveau_pm_profile *profile = NULL;
177 struct nouveau_pm_level *perflvl = NULL; 127 struct nouveau_pm_level *perflvl = NULL;
178 int ret; 128 int ret;
@@ -194,24 +144,22 @@ nouveau_pm_trigger(struct drm_device *dev)
194 144
195 /* change perflvl, if necessary */ 145 /* change perflvl, if necessary */
196 if (perflvl != pm->cur) { 146 if (perflvl != pm->cur) {
197 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 147 u64 time0 = ptimer->read(ptimer);
198 u64 time0 = ptimer->read(dev);
199 148
200 NV_INFO(dev, "setting performance level: %d", perflvl->id); 149 NV_INFO(drm, "setting performance level: %d", perflvl->id);
201 ret = nouveau_pm_perflvl_set(dev, perflvl); 150 ret = nouveau_pm_perflvl_set(dev, perflvl);
202 if (ret) 151 if (ret)
203 NV_INFO(dev, "> reclocking failed: %d\n\n", ret); 152 NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
204 153
205 NV_INFO(dev, "> reclocking took %lluns\n\n", 154 NV_INFO(drm, "> reclocking took %lluns\n\n",
206 ptimer->read(dev) - time0); 155 ptimer->read(ptimer) - time0);
207 } 156 }
208} 157}
209 158
210static struct nouveau_pm_profile * 159static struct nouveau_pm_profile *
211profile_find(struct drm_device *dev, const char *string) 160profile_find(struct drm_device *dev, const char *string)
212{ 161{
213 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct nouveau_pm *pm = nouveau_pm(dev);
214 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
215 struct nouveau_pm_profile *profile; 163 struct nouveau_pm_profile *profile;
216 164
217 list_for_each_entry(profile, &pm->profiles, head) { 165 list_for_each_entry(profile, &pm->profiles, head) {
@@ -225,8 +173,7 @@ profile_find(struct drm_device *dev, const char *string)
225static int 173static int
226nouveau_pm_profile_set(struct drm_device *dev, const char *profile) 174nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
227{ 175{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 176 struct nouveau_pm *pm = nouveau_pm(dev);
229 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
230 struct nouveau_pm_profile *ac = NULL, *dc = NULL; 177 struct nouveau_pm_profile *ac = NULL, *dc = NULL;
231 char string[16], *cur = string, *ptr; 178 char string[16], *cur = string, *ptr;
232 179
@@ -279,8 +226,9 @@ const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
279static int 226static int
280nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 227nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
281{ 228{
282 struct drm_nouveau_private *dev_priv = dev->dev_private; 229 struct nouveau_drm *drm = nouveau_drm(dev);
283 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 230 struct nouveau_pm *pm = nouveau_pm(dev);
231 struct nouveau_therm *therm = nouveau_therm(drm->device);
284 int ret; 232 int ret;
285 233
286 memset(perflvl, 0, sizeof(*perflvl)); 234 memset(perflvl, 0, sizeof(*perflvl));
@@ -299,9 +247,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
299 } 247 }
300 } 248 }
301 249
302 ret = nouveau_pwmfan_get(dev); 250 if (therm && therm->fan_get) {
303 if (ret > 0) 251 ret = therm->fan_get(therm);
304 perflvl->fanspeed = ret; 252 if (ret >= 0)
253 perflvl->fanspeed = ret;
254 }
305 255
306 nouveau_mem_timing_read(dev, &perflvl->timing); 256 nouveau_mem_timing_read(dev, &perflvl->timing);
307 return 0; 257 return 0;
@@ -362,8 +312,7 @@ static ssize_t
362nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) 312nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
363{ 313{
364 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); 314 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
365 struct drm_nouveau_private *dev_priv = dev->dev_private; 315 struct nouveau_pm *pm = nouveau_pm(dev);
366 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
367 struct nouveau_pm_level cur; 316 struct nouveau_pm_level cur;
368 int len = PAGE_SIZE, ret; 317 int len = PAGE_SIZE, ret;
369 char *ptr = buf; 318 char *ptr = buf;
@@ -398,8 +347,8 @@ static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
398static int 347static int
399nouveau_sysfs_init(struct drm_device *dev) 348nouveau_sysfs_init(struct drm_device *dev)
400{ 349{
401 struct drm_nouveau_private *dev_priv = dev->dev_private; 350 struct nouveau_drm *drm = nouveau_drm(dev);
402 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 351 struct nouveau_pm *pm = nouveau_pm(dev);
403 struct device *d = &dev->pdev->dev; 352 struct device *d = &dev->pdev->dev;
404 int ret, i; 353 int ret, i;
405 354
@@ -418,7 +367,7 @@ nouveau_sysfs_init(struct drm_device *dev)
418 367
419 ret = device_create_file(d, &perflvl->dev_attr); 368 ret = device_create_file(d, &perflvl->dev_attr);
420 if (ret) { 369 if (ret) {
421 NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", 370 NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
422 perflvl->id, i); 371 perflvl->id, i);
423 perflvl->dev_attr.attr.name = NULL; 372 perflvl->dev_attr.attr.name = NULL;
424 nouveau_pm_fini(dev); 373 nouveau_pm_fini(dev);
@@ -432,8 +381,7 @@ nouveau_sysfs_init(struct drm_device *dev)
432static void 381static void
433nouveau_sysfs_fini(struct drm_device *dev) 382nouveau_sysfs_fini(struct drm_device *dev)
434{ 383{
435 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct nouveau_pm *pm = nouveau_pm(dev);
436 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
437 struct device *d = &dev->pdev->dev; 385 struct device *d = &dev->pdev->dev;
438 int i; 386 int i;
439 387
@@ -453,10 +401,10 @@ static ssize_t
453nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 401nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
454{ 402{
455 struct drm_device *dev = dev_get_drvdata(d); 403 struct drm_device *dev = dev_get_drvdata(d);
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 404 struct nouveau_drm *drm = nouveau_drm(dev);
457 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 405 struct nouveau_therm *therm = nouveau_therm(drm->device);
458 406
459 return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); 407 return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
460} 408}
461static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, 409static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
462 NULL, 0); 410 NULL, 0);
@@ -465,28 +413,25 @@ static ssize_t
465nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) 413nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
466{ 414{
467 struct drm_device *dev = dev_get_drvdata(d); 415 struct drm_device *dev = dev_get_drvdata(d);
468 struct drm_nouveau_private *dev_priv = dev->dev_private; 416 struct nouveau_drm *drm = nouveau_drm(dev);
469 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 417 struct nouveau_therm *therm = nouveau_therm(drm->device);
470 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
471 418
472 return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); 419 return snprintf(buf, PAGE_SIZE, "%d\n",
420 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
473} 421}
474static ssize_t 422static ssize_t
475nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, 423nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
476 const char *buf, size_t count) 424 const char *buf, size_t count)
477{ 425{
478 struct drm_device *dev = dev_get_drvdata(d); 426 struct drm_device *dev = dev_get_drvdata(d);
479 struct drm_nouveau_private *dev_priv = dev->dev_private; 427 struct nouveau_drm *drm = nouveau_drm(dev);
480 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 428 struct nouveau_therm *therm = nouveau_therm(drm->device);
481 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
482 long value; 429 long value;
483 430
484 if (kstrtol(buf, 10, &value) == -EINVAL) 431 if (kstrtol(buf, 10, &value) == -EINVAL)
485 return count; 432 return count;
486 433
487 temp->down_clock = value/1000; 434 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
488
489 nouveau_temp_safety_checks(dev);
490 435
491 return count; 436 return count;
492} 437}
@@ -499,11 +444,11 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
499 char *buf) 444 char *buf)
500{ 445{
501 struct drm_device *dev = dev_get_drvdata(d); 446 struct drm_device *dev = dev_get_drvdata(d);
502 struct drm_nouveau_private *dev_priv = dev->dev_private; 447 struct nouveau_drm *drm = nouveau_drm(dev);
503 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 448 struct nouveau_therm *therm = nouveau_therm(drm->device);
504 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
505 449
506 return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); 450 return snprintf(buf, PAGE_SIZE, "%d\n",
451 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
507} 452}
508static ssize_t 453static ssize_t
509nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, 454nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
@@ -511,17 +456,14 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
511 size_t count) 456 size_t count)
512{ 457{
513 struct drm_device *dev = dev_get_drvdata(d); 458 struct drm_device *dev = dev_get_drvdata(d);
514 struct drm_nouveau_private *dev_priv = dev->dev_private; 459 struct nouveau_drm *drm = nouveau_drm(dev);
515 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 460 struct nouveau_therm *therm = nouveau_therm(drm->device);
516 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
517 long value; 461 long value;
518 462
519 if (kstrtol(buf, 10, &value) == -EINVAL) 463 if (kstrtol(buf, 10, &value) == -EINVAL)
520 return count; 464 return count;
521 465
522 temp->critical = value/1000; 466 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
523
524 nouveau_temp_safety_checks(dev);
525 467
526 return count; 468 return count;
527} 469}
@@ -553,47 +495,62 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
553 char *buf) 495 char *buf)
554{ 496{
555 struct drm_device *dev = dev_get_drvdata(d); 497 struct drm_device *dev = dev_get_drvdata(d);
556 struct drm_nouveau_private *dev_priv = dev->dev_private; 498 struct nouveau_drm *drm = nouveau_drm(dev);
557 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 499 struct nouveau_therm *therm = nouveau_therm(drm->device);
558 struct gpio_func gpio; 500
559 u32 cycles, cur, prev; 501 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
560 u64 start; 502}
503static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
504 NULL, 0);
505
506 static ssize_t
507nouveau_hwmon_get_pwm1_enable(struct device *d,
508 struct device_attribute *a, char *buf)
509{
510 struct drm_device *dev = dev_get_drvdata(d);
511 struct nouveau_drm *drm = nouveau_drm(dev);
512 struct nouveau_therm *therm = nouveau_therm(drm->device);
561 int ret; 513 int ret;
562 514
563 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio); 515 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
564 if (ret) 516 if (ret < 0)
565 return ret; 517 return ret;
566 518
567 /* Monitor the GPIO input 0x3b for 250ms. 519 return sprintf(buf, "%i\n", ret);
568 * When the fan spins, it changes the value of GPIO FAN_SENSE. 520}
569 * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
570 */
571 start = ptimer->read(dev);
572 prev = nouveau_gpio_sense(dev, 0, gpio.line);
573 cycles = 0;
574 do {
575 cur = nouveau_gpio_sense(dev, 0, gpio.line);
576 if (prev != cur) {
577 cycles++;
578 prev = cur;
579 }
580 521
581 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ 522static ssize_t
582 } while (ptimer->read(dev) - start < 250000000); 523nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
524 const char *buf, size_t count)
525{
526 struct drm_device *dev = dev_get_drvdata(d);
527 struct nouveau_drm *drm = nouveau_drm(dev);
528 struct nouveau_therm *therm = nouveau_therm(drm->device);
529 long value;
530 int ret;
531
532 if (strict_strtol(buf, 10, &value) == -EINVAL)
533 return -EINVAL;
583 534
584 /* interpolate to get rpm */ 535 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
585 return sprintf(buf, "%i\n", cycles / 4 * 4 * 60); 536 if (ret)
537 return ret;
538 else
539 return count;
586} 540}
587static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input, 541static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
588 NULL, 0); 542 nouveau_hwmon_get_pwm1_enable,
543 nouveau_hwmon_set_pwm1_enable, 0);
589 544
590static ssize_t 545static ssize_t
591nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf) 546nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
592{ 547{
593 struct drm_device *dev = dev_get_drvdata(d); 548 struct drm_device *dev = dev_get_drvdata(d);
549 struct nouveau_drm *drm = nouveau_drm(dev);
550 struct nouveau_therm *therm = nouveau_therm(drm->device);
594 int ret; 551 int ret;
595 552
596 ret = nouveau_pwmfan_get(dev); 553 ret = therm->fan_get(therm);
597 if (ret < 0) 554 if (ret < 0)
598 return ret; 555 return ret;
599 556
@@ -601,12 +558,12 @@ nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
601} 558}
602 559
603static ssize_t 560static ssize_t
604nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a, 561nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
605 const char *buf, size_t count) 562 const char *buf, size_t count)
606{ 563{
607 struct drm_device *dev = dev_get_drvdata(d); 564 struct drm_device *dev = dev_get_drvdata(d);
608 struct drm_nouveau_private *dev_priv = dev->dev_private; 565 struct nouveau_drm *drm = nouveau_drm(dev);
609 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 566 struct nouveau_therm *therm = nouveau_therm(drm->device);
610 int ret = -ENODEV; 567 int ret = -ENODEV;
611 long value; 568 long value;
612 569
@@ -616,103 +573,96 @@ nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
616 if (kstrtol(buf, 10, &value) == -EINVAL) 573 if (kstrtol(buf, 10, &value) == -EINVAL)
617 return -EINVAL; 574 return -EINVAL;
618 575
619 if (value < pm->fan.min_duty) 576 ret = therm->fan_set(therm, value);
620 value = pm->fan.min_duty;
621 if (value > pm->fan.max_duty)
622 value = pm->fan.max_duty;
623
624 ret = nouveau_pwmfan_set(dev, value);
625 if (ret) 577 if (ret)
626 return ret; 578 return ret;
627 579
628 return count; 580 return count;
629} 581}
630 582
631static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR, 583static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
632 nouveau_hwmon_get_pwm0, 584 nouveau_hwmon_get_pwm1,
633 nouveau_hwmon_set_pwm0, 0); 585 nouveau_hwmon_set_pwm1, 0);
634 586
635static ssize_t 587static ssize_t
636nouveau_hwmon_get_pwm0_min(struct device *d, 588nouveau_hwmon_get_pwm1_min(struct device *d,
637 struct device_attribute *a, char *buf) 589 struct device_attribute *a, char *buf)
638{ 590{
639 struct drm_device *dev = dev_get_drvdata(d); 591 struct drm_device *dev = dev_get_drvdata(d);
640 struct drm_nouveau_private *dev_priv = dev->dev_private; 592 struct nouveau_drm *drm = nouveau_drm(dev);
641 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 593 struct nouveau_therm *therm = nouveau_therm(drm->device);
594 int ret;
595
596 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
597 if (ret < 0)
598 return ret;
642 599
643 return sprintf(buf, "%i\n", pm->fan.min_duty); 600 return sprintf(buf, "%i\n", ret);
644} 601}
645 602
646static ssize_t 603static ssize_t
647nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a, 604nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
648 const char *buf, size_t count) 605 const char *buf, size_t count)
649{ 606{
650 struct drm_device *dev = dev_get_drvdata(d); 607 struct drm_device *dev = dev_get_drvdata(d);
651 struct drm_nouveau_private *dev_priv = dev->dev_private; 608 struct nouveau_drm *drm = nouveau_drm(dev);
652 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 609 struct nouveau_therm *therm = nouveau_therm(drm->device);
653 long value; 610 long value;
611 int ret;
654 612
655 if (kstrtol(buf, 10, &value) == -EINVAL) 613 if (kstrtol(buf, 10, &value) == -EINVAL)
656 return -EINVAL; 614 return -EINVAL;
657 615
658 if (value < 0) 616 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
659 value = 0; 617 if (ret < 0)
660 618 return ret;
661 if (pm->fan.max_duty - value < 10)
662 value = pm->fan.max_duty - 10;
663
664 if (value < 10)
665 pm->fan.min_duty = 10;
666 else
667 pm->fan.min_duty = value;
668 619
669 return count; 620 return count;
670} 621}
671 622
672static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR, 623static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
673 nouveau_hwmon_get_pwm0_min, 624 nouveau_hwmon_get_pwm1_min,
674 nouveau_hwmon_set_pwm0_min, 0); 625 nouveau_hwmon_set_pwm1_min, 0);
675 626
676static ssize_t 627static ssize_t
677nouveau_hwmon_get_pwm0_max(struct device *d, 628nouveau_hwmon_get_pwm1_max(struct device *d,
678 struct device_attribute *a, char *buf) 629 struct device_attribute *a, char *buf)
679{ 630{
680 struct drm_device *dev = dev_get_drvdata(d); 631 struct drm_device *dev = dev_get_drvdata(d);
681 struct drm_nouveau_private *dev_priv = dev->dev_private; 632 struct nouveau_drm *drm = nouveau_drm(dev);
682 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 633 struct nouveau_therm *therm = nouveau_therm(drm->device);
634 int ret;
635
636 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
637 if (ret < 0)
638 return ret;
683 639
684 return sprintf(buf, "%i\n", pm->fan.max_duty); 640 return sprintf(buf, "%i\n", ret);
685} 641}
686 642
687static ssize_t 643static ssize_t
688nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a, 644nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
689 const char *buf, size_t count) 645 const char *buf, size_t count)
690{ 646{
691 struct drm_device *dev = dev_get_drvdata(d); 647 struct drm_device *dev = dev_get_drvdata(d);
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 648 struct nouveau_drm *drm = nouveau_drm(dev);
693 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 649 struct nouveau_therm *therm = nouveau_therm(drm->device);
694 long value; 650 long value;
651 int ret;
695 652
696 if (kstrtol(buf, 10, &value) == -EINVAL) 653 if (kstrtol(buf, 10, &value) == -EINVAL)
697 return -EINVAL; 654 return -EINVAL;
698 655
699 if (value < 0) 656 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
700 value = 0; 657 if (ret < 0)
701 658 return ret;
702 if (value - pm->fan.min_duty < 10)
703 value = pm->fan.min_duty + 10;
704
705 if (value > 100)
706 pm->fan.max_duty = 100;
707 else
708 pm->fan.max_duty = value;
709 659
710 return count; 660 return count;
711} 661}
712 662
713static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR, 663static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
714 nouveau_hwmon_get_pwm0_max, 664 nouveau_hwmon_get_pwm1_max,
715 nouveau_hwmon_set_pwm0_max, 0); 665 nouveau_hwmon_set_pwm1_max, 0);
716 666
717static struct attribute *hwmon_attributes[] = { 667static struct attribute *hwmon_attributes[] = {
718 &sensor_dev_attr_temp1_input.dev_attr.attr, 668 &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -727,9 +677,10 @@ static struct attribute *hwmon_fan_rpm_attributes[] = {
727 NULL 677 NULL
728}; 678};
729static struct attribute *hwmon_pwm_fan_attributes[] = { 679static struct attribute *hwmon_pwm_fan_attributes[] = {
730 &sensor_dev_attr_pwm0.dev_attr.attr, 680 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
731 &sensor_dev_attr_pwm0_min.dev_attr.attr, 681 &sensor_dev_attr_pwm1.dev_attr.attr,
732 &sensor_dev_attr_pwm0_max.dev_attr.attr, 682 &sensor_dev_attr_pwm1_min.dev_attr.attr,
683 &sensor_dev_attr_pwm1_max.dev_attr.attr,
733 NULL 684 NULL
734}; 685};
735 686
@@ -747,20 +698,22 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
747static int 698static int
748nouveau_hwmon_init(struct drm_device *dev) 699nouveau_hwmon_init(struct drm_device *dev)
749{ 700{
750 struct drm_nouveau_private *dev_priv = dev->dev_private; 701 struct nouveau_pm *pm = nouveau_pm(dev);
751 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 702 struct nouveau_drm *drm = nouveau_drm(dev);
703 struct nouveau_therm *therm = nouveau_therm(drm->device);
704
752#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 705#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
753 struct device *hwmon_dev; 706 struct device *hwmon_dev;
754 int ret = 0; 707 int ret = 0;
755 708
756 if (!pm->temp_get) 709 if (!therm || !therm->temp_get || !therm->attr_get ||
710 !therm->attr_set || therm->temp_get(therm) < 0)
757 return -ENODEV; 711 return -ENODEV;
758 712
759 hwmon_dev = hwmon_device_register(&dev->pdev->dev); 713 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
760 if (IS_ERR(hwmon_dev)) { 714 if (IS_ERR(hwmon_dev)) {
761 ret = PTR_ERR(hwmon_dev); 715 ret = PTR_ERR(hwmon_dev);
762 NV_ERROR(dev, 716 NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
763 "Unable to register hwmon device: %d\n", ret);
764 return ret; 717 return ret;
765 } 718 }
766 dev_set_drvdata(hwmon_dev, dev); 719 dev_set_drvdata(hwmon_dev, dev);
@@ -776,7 +729,7 @@ nouveau_hwmon_init(struct drm_device *dev)
776 /*XXX: incorrect, need better detection for this, some boards have 729 /*XXX: incorrect, need better detection for this, some boards have
777 * the gpio entries for pwm fan control even when there's no 730 * the gpio entries for pwm fan control even when there's no
778 * actual fan connected to it... therm table? */ 731 * actual fan connected to it... therm table? */
779 if (nouveau_pwmfan_get(dev) >= 0) { 732 if (therm->fan_get && therm->fan_get(therm) >= 0) {
780 ret = sysfs_create_group(&dev->pdev->dev.kobj, 733 ret = sysfs_create_group(&dev->pdev->dev.kobj,
781 &hwmon_pwm_fan_attrgroup); 734 &hwmon_pwm_fan_attrgroup);
782 if (ret) 735 if (ret)
@@ -784,7 +737,7 @@ nouveau_hwmon_init(struct drm_device *dev)
784 } 737 }
785 738
786 /* if the card can read the fan rpm */ 739 /* if the card can read the fan rpm */
787 if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) { 740 if (therm->fan_sense(therm) >= 0) {
788 ret = sysfs_create_group(&dev->pdev->dev.kobj, 741 ret = sysfs_create_group(&dev->pdev->dev.kobj,
789 &hwmon_fan_rpm_attrgroup); 742 &hwmon_fan_rpm_attrgroup);
790 if (ret) 743 if (ret)
@@ -796,7 +749,7 @@ nouveau_hwmon_init(struct drm_device *dev)
796 return 0; 749 return 0;
797 750
798error: 751error:
799 NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret); 752 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
800 hwmon_device_unregister(hwmon_dev); 753 hwmon_device_unregister(hwmon_dev);
801 pm->hwmon = NULL; 754 pm->hwmon = NULL;
802 return ret; 755 return ret;
@@ -810,8 +763,7 @@ static void
810nouveau_hwmon_fini(struct drm_device *dev) 763nouveau_hwmon_fini(struct drm_device *dev)
811{ 764{
812#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 765#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
813 struct drm_nouveau_private *dev_priv = dev->dev_private; 766 struct nouveau_pm *pm = nouveau_pm(dev);
814 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
815 767
816 if (pm->hwmon) { 768 if (pm->hwmon) {
817 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 769 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
@@ -829,16 +781,15 @@ nouveau_hwmon_fini(struct drm_device *dev)
829static int 781static int
830nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) 782nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
831{ 783{
832 struct drm_nouveau_private *dev_priv = 784 struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
833 container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); 785 struct nouveau_drm *drm = nouveau_drm(pm->dev);
834 struct drm_device *dev = dev_priv->dev;
835 struct acpi_bus_event *entry = (struct acpi_bus_event *)data; 786 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
836 787
837 if (strcmp(entry->device_class, "ac_adapter") == 0) { 788 if (strcmp(entry->device_class, "ac_adapter") == 0) {
838 bool ac = power_supply_is_system_supplied(); 789 bool ac = power_supply_is_system_supplied();
839 790
840 NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); 791 NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
841 nouveau_pm_trigger(dev); 792 nouveau_pm_trigger(pm->dev);
842 } 793 }
843 794
844 return NOTIFY_OK; 795 return NOTIFY_OK;
@@ -848,19 +799,67 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
848int 799int
849nouveau_pm_init(struct drm_device *dev) 800nouveau_pm_init(struct drm_device *dev)
850{ 801{
851 struct drm_nouveau_private *dev_priv = dev->dev_private; 802 struct nouveau_device *device = nouveau_dev(dev);
852 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 803 struct nouveau_drm *drm = nouveau_drm(dev);
804 struct nouveau_pm *pm;
853 char info[256]; 805 char info[256];
854 int ret, i; 806 int ret, i;
855 807
808 pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
809 if (!pm)
810 return -ENOMEM;
811
812 pm->dev = dev;
813
814 if (device->card_type < NV_40) {
815 pm->clocks_get = nv04_pm_clocks_get;
816 pm->clocks_pre = nv04_pm_clocks_pre;
817 pm->clocks_set = nv04_pm_clocks_set;
818 if (nouveau_gpio(drm->device)) {
819 pm->voltage_get = nouveau_voltage_gpio_get;
820 pm->voltage_set = nouveau_voltage_gpio_set;
821 }
822 } else
823 if (device->card_type < NV_50) {
824 pm->clocks_get = nv40_pm_clocks_get;
825 pm->clocks_pre = nv40_pm_clocks_pre;
826 pm->clocks_set = nv40_pm_clocks_set;
827 pm->voltage_get = nouveau_voltage_gpio_get;
828 pm->voltage_set = nouveau_voltage_gpio_set;
829 } else
830 if (device->card_type < NV_C0) {
831 if (device->chipset < 0xa3 ||
832 device->chipset == 0xaa ||
833 device->chipset == 0xac) {
834 pm->clocks_get = nv50_pm_clocks_get;
835 pm->clocks_pre = nv50_pm_clocks_pre;
836 pm->clocks_set = nv50_pm_clocks_set;
837 } else {
838 pm->clocks_get = nva3_pm_clocks_get;
839 pm->clocks_pre = nva3_pm_clocks_pre;
840 pm->clocks_set = nva3_pm_clocks_set;
841 }
842 pm->voltage_get = nouveau_voltage_gpio_get;
843 pm->voltage_set = nouveau_voltage_gpio_set;
844 } else
845 if (device->card_type < NV_E0) {
846 pm->clocks_get = nvc0_pm_clocks_get;
847 pm->clocks_pre = nvc0_pm_clocks_pre;
848 pm->clocks_set = nvc0_pm_clocks_set;
849 pm->voltage_get = nouveau_voltage_gpio_get;
850 pm->voltage_set = nouveau_voltage_gpio_set;
851 }
852
853
856 /* parse aux tables from vbios */ 854 /* parse aux tables from vbios */
857 nouveau_volt_init(dev); 855 nouveau_volt_init(dev);
858 nouveau_temp_init(dev); 856
857 INIT_LIST_HEAD(&pm->profiles);
859 858
860 /* determine current ("boot") performance level */ 859 /* determine current ("boot") performance level */
861 ret = nouveau_pm_perflvl_get(dev, &pm->boot); 860 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
862 if (ret) { 861 if (ret) {
863 NV_ERROR(dev, "failed to determine boot perflvl\n"); 862 NV_ERROR(drm, "failed to determine boot perflvl\n");
864 return ret; 863 return ret;
865 } 864 }
866 865
@@ -868,7 +867,6 @@ nouveau_pm_init(struct drm_device *dev)
868 strncpy(pm->boot.profile.name, "boot", 4); 867 strncpy(pm->boot.profile.name, "boot", 4);
869 pm->boot.profile.func = &nouveau_pm_static_profile_func; 868 pm->boot.profile.func = &nouveau_pm_static_profile_func;
870 869
871 INIT_LIST_HEAD(&pm->profiles);
872 list_add(&pm->boot.profile.head, &pm->profiles); 870 list_add(&pm->boot.profile.head, &pm->profiles);
873 871
874 pm->profile_ac = &pm->boot.profile; 872 pm->profile_ac = &pm->boot.profile;
@@ -880,22 +878,19 @@ nouveau_pm_init(struct drm_device *dev)
880 nouveau_perf_init(dev); 878 nouveau_perf_init(dev);
881 879
882 /* display available performance levels */ 880 /* display available performance levels */
883 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 881 NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
884 for (i = 0; i < pm->nr_perflvl; i++) { 882 for (i = 0; i < pm->nr_perflvl; i++) {
885 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); 883 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
886 NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); 884 NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
887 } 885 }
888 886
889 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); 887 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
890 NV_INFO(dev, "c:%s", info); 888 NV_INFO(drm, "c:%s", info);
891 889
892 /* switch performance levels now if requested */ 890 /* switch performance levels now if requested */
893 if (nouveau_perflvl != NULL) 891 if (nouveau_perflvl != NULL)
894 nouveau_pm_profile_set(dev, nouveau_perflvl); 892 nouveau_pm_profile_set(dev, nouveau_perflvl);
895 893
896 /* determine the current fan speed */
897 pm->fan.percent = nouveau_pwmfan_get(dev);
898
899 nouveau_sysfs_init(dev); 894 nouveau_sysfs_init(dev);
900 nouveau_hwmon_init(dev); 895 nouveau_hwmon_init(dev);
901#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) 896#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
@@ -909,8 +904,7 @@ nouveau_pm_init(struct drm_device *dev)
909void 904void
910nouveau_pm_fini(struct drm_device *dev) 905nouveau_pm_fini(struct drm_device *dev)
911{ 906{
912 struct drm_nouveau_private *dev_priv = dev->dev_private; 907 struct nouveau_pm *pm = nouveau_pm(dev);
913 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
914 struct nouveau_pm_profile *profile, *tmp; 908 struct nouveau_pm_profile *profile, *tmp;
915 909
916 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { 910 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
@@ -921,7 +915,6 @@ nouveau_pm_fini(struct drm_device *dev)
921 if (pm->cur != &pm->boot) 915 if (pm->cur != &pm->boot)
922 nouveau_pm_perflvl_set(dev, &pm->boot); 916 nouveau_pm_perflvl_set(dev, &pm->boot);
923 917
924 nouveau_temp_fini(dev);
925 nouveau_perf_fini(dev); 918 nouveau_perf_fini(dev);
926 nouveau_volt_fini(dev); 919 nouveau_volt_fini(dev);
927 920
@@ -930,13 +923,15 @@ nouveau_pm_fini(struct drm_device *dev)
930#endif 923#endif
931 nouveau_hwmon_fini(dev); 924 nouveau_hwmon_fini(dev);
932 nouveau_sysfs_fini(dev); 925 nouveau_sysfs_fini(dev);
926
927 nouveau_drm(dev)->pm = NULL;
928 kfree(pm);
933} 929}
934 930
935void 931void
936nouveau_pm_resume(struct drm_device *dev) 932nouveau_pm_resume(struct drm_device *dev)
937{ 933{
938 struct drm_nouveau_private *dev_priv = dev->dev_private; 934 struct nouveau_pm *pm = nouveau_pm(dev);
939 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
940 struct nouveau_pm_level *perflvl; 935 struct nouveau_pm_level *perflvl;
941 936
942 if (!pm->cur || pm->cur == &pm->boot) 937 if (!pm->cur || pm->cur == &pm->boot)
@@ -945,5 +940,4 @@ nouveau_pm_resume(struct drm_device *dev)
945 perflvl = pm->cur; 940 perflvl = pm->cur;
946 pm->cur = &pm->boot; 941 pm->cur = &pm->boot;
947 nouveau_pm_perflvl_set(dev, perflvl); 942 nouveau_pm_perflvl_set(dev, perflvl);
948 nouveau_pwmfan_set(dev, pm->fan.percent);
949} 943}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 07cac72c72b4..73b789c230a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -25,6 +25,165 @@
25#ifndef __NOUVEAU_PM_H__ 25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__ 26#define __NOUVEAU_PM_H__
27 27
28#include <subdev/bios/pll.h>
29#include <subdev/clock.h>
30
31struct nouveau_pm_voltage_level {
32 u32 voltage; /* microvolts */
33 u8 vid;
34};
35
36struct nouveau_pm_voltage {
37 bool supported;
38 u8 version;
39 u8 vid_mask;
40
41 struct nouveau_pm_voltage_level *level;
42 int nr_level;
43};
44
45/* Exclusive upper limits */
46#define NV_MEM_CL_DDR2_MAX 8
47#define NV_MEM_WR_DDR2_MAX 9
48#define NV_MEM_CL_DDR3_MAX 17
49#define NV_MEM_WR_DDR3_MAX 17
50#define NV_MEM_CL_GDDR3_MAX 16
51#define NV_MEM_WR_GDDR3_MAX 18
52#define NV_MEM_CL_GDDR5_MAX 21
53#define NV_MEM_WR_GDDR5_MAX 20
54
55struct nouveau_pm_memtiming {
56 int id;
57
58 u32 reg[9];
59 u32 mr[4];
60
61 u8 tCWL;
62
63 u8 odt;
64 u8 drive_strength;
65};
66
67struct nouveau_pm_tbl_header {
68 u8 version;
69 u8 header_len;
70 u8 entry_cnt;
71 u8 entry_len;
72};
73
74struct nouveau_pm_tbl_entry {
75 u8 tWR;
76 u8 tWTR;
77 u8 tCL;
78 u8 tRC;
79 u8 empty_4;
80 u8 tRFC; /* Byte 5 */
81 u8 empty_6;
82 u8 tRAS; /* Byte 7 */
83 u8 empty_8;
84 u8 tRP; /* Byte 9 */
85 u8 tRCDRD;
86 u8 tRCDWR;
87 u8 tRRD;
88 u8 tUNK_13;
89 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
90 u8 empty_15;
91 u8 tUNK_16;
92 u8 empty_17;
93 u8 tUNK_18;
94 u8 tCWL;
95 u8 tUNK_20, tUNK_21;
96};
97
98struct nouveau_pm_profile;
99struct nouveau_pm_profile_func {
100 void (*destroy)(struct nouveau_pm_profile *);
101 void (*init)(struct nouveau_pm_profile *);
102 void (*fini)(struct nouveau_pm_profile *);
103 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
104};
105
106struct nouveau_pm_profile {
107 const struct nouveau_pm_profile_func *func;
108 struct list_head head;
109 char name[8];
110};
111
112#define NOUVEAU_PM_MAX_LEVEL 8
113struct nouveau_pm_level {
114 struct nouveau_pm_profile profile;
115 struct device_attribute dev_attr;
116 char name[32];
117 int id;
118
119 struct nouveau_pm_memtiming timing;
120 u32 memory;
121 u16 memscript;
122
123 u32 core;
124 u32 shader;
125 u32 rop;
126 u32 copy;
127 u32 daemon;
128 u32 vdec;
129 u32 dom6;
130 u32 unka0; /* nva3:nvc0 */
131 u32 hub01; /* nvc0- */
132 u32 hub06; /* nvc0- */
133 u32 hub07; /* nvc0- */
134
135 u32 volt_min; /* microvolts */
136 u32 volt_max;
137 u8 fanspeed;
138};
139
140struct nouveau_pm_temp_sensor_constants {
141 u16 offset_constant;
142 s16 offset_mult;
143 s16 offset_div;
144 s16 slope_mult;
145 s16 slope_div;
146};
147
148struct nouveau_pm_threshold_temp {
149 s16 critical;
150 s16 down_clock;
151};
152
153struct nouveau_pm {
154 struct drm_device *dev;
155
156 struct nouveau_pm_voltage voltage;
157 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
158 int nr_perflvl;
159 struct nouveau_pm_temp_sensor_constants sensor_constants;
160 struct nouveau_pm_threshold_temp threshold_temp;
161
162 struct nouveau_pm_profile *profile_ac;
163 struct nouveau_pm_profile *profile_dc;
164 struct nouveau_pm_profile *profile;
165 struct list_head profiles;
166
167 struct nouveau_pm_level boot;
168 struct nouveau_pm_level *cur;
169
170 struct device *hwmon;
171 struct notifier_block acpi_nb;
172
173 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
174 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
175 int (*clocks_set)(struct drm_device *, void *);
176
177 int (*voltage_get)(struct drm_device *);
178 int (*voltage_set)(struct drm_device *, int voltage);
179};
180
181static inline struct nouveau_pm *
182nouveau_pm(struct drm_device *dev)
183{
184 return nouveau_drm(dev)->pm;
185}
186
28struct nouveau_mem_exec_func { 187struct nouveau_mem_exec_func {
29 struct drm_device *dev; 188 struct drm_device *dev;
30 void (*precharge)(struct nouveau_mem_exec_func *); 189 void (*precharge)(struct nouveau_mem_exec_func *);
@@ -99,11 +258,26 @@ int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
99void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); 258void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
100int nvc0_pm_clocks_set(struct drm_device *, void *); 259int nvc0_pm_clocks_set(struct drm_device *, void *);
101 260
102/* nouveau_temp.c */ 261/* nouveau_mem.c */
103void nouveau_temp_init(struct drm_device *dev); 262int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
104void nouveau_temp_fini(struct drm_device *dev); 263 struct nouveau_pm_memtiming *);
105void nouveau_temp_safety_checks(struct drm_device *dev); 264void nouveau_mem_timing_read(struct drm_device *,
106int nv40_temp_get(struct drm_device *dev); 265 struct nouveau_pm_memtiming *);
107int nv84_temp_get(struct drm_device *dev); 266
267static inline int
268nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
269 int *N, int *fN, int *M, int *P)
270{
271 struct nouveau_device *device = nouveau_dev(dev);
272 struct nouveau_clock *clk = nouveau_clock(device);
273 struct nouveau_pll_vals pv;
274 int ret;
275
276 ret = clk->pll_calc(clk, pll, freq, &pv);
277 *N = pv.N1;
278 *M = pv.M1;
279 *P = pv.log2P;
280 return ret;
281}
108 282
109#endif 283#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index a25cf2cb931f..4ffa655545e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,14 +22,13 @@
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 */ 23 */
24 24
25#include <linux/dma-buf.h>
26
25#include "drmP.h" 27#include "drmP.h"
26#include "drm.h" 28#include "drm.h"
27 29
28#include "nouveau_drv.h"
29#include "nouveau_drm.h" 30#include "nouveau_drm.h"
30#include "nouveau_dma.h" 31#include "nouveau_gem.h"
31
32#include <linux/dma-buf.h>
33 32
34static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, 33static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
35 enum dma_data_direction dir) 34 enum dma_data_direction dir)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
deleted file mode 100644
index a24a81f5a89e..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29
30static u32
31nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
32{
33 struct drm_device *dev = chan->dev;
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_ramht *ramht = chan->ramht;
36 u32 hash = 0;
37 int i;
38
39 NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
40
41 for (i = 32; i > 0; i -= ramht->bits) {
42 hash ^= (handle & ((1 << ramht->bits) - 1));
43 handle >>= ramht->bits;
44 }
45
46 if (dev_priv->card_type < NV_50)
47 hash ^= chan->id << (ramht->bits - 4);
48 hash <<= 3;
49
50 NV_DEBUG(dev, "hash=0x%08x\n", hash);
51 return hash;
52}
53
54static int
55nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
56 u32 offset)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 u32 ctx = nv_ro32(ramht, offset + 4);
60
61 if (dev_priv->card_type < NV_40)
62 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
63 return (ctx != 0);
64}
65
66static int
67nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68 struct nouveau_gpuobj *ramht, u32 offset)
69{
70 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71 u32 ctx = nv_ro32(ramht, offset + 4);
72
73 if (dev_priv->card_type >= NV_50)
74 return true;
75 else if (dev_priv->card_type >= NV_40)
76 return chan->id ==
77 ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
78 else
79 return chan->id ==
80 ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
81}
82
83int
84nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
85 struct nouveau_gpuobj *gpuobj)
86{
87 struct drm_device *dev = chan->dev;
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90 struct nouveau_ramht_entry *entry;
91 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
92 unsigned long flags;
93 u32 ctx, co, ho;
94
95 if (nouveau_ramht_find(chan, handle))
96 return -EEXIST;
97
98 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
99 if (!entry)
100 return -ENOMEM;
101 entry->channel = chan;
102 entry->gpuobj = NULL;
103 entry->handle = handle;
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105
106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else
111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->pinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) |
118 (chan->id << 28) |
119 chan->id; /* HASH_TAG */
120 } else {
121 ctx = (gpuobj->cinst >> 4) |
122 ((gpuobj->engine <<
123 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
124 }
125 }
126
127 spin_lock_irqsave(&chan->ramht->lock, flags);
128 list_add(&entry->head, &chan->ramht->entries);
129
130 co = ho = nouveau_ramht_hash_handle(chan, handle);
131 do {
132 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
133 NV_DEBUG(dev,
134 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
135 chan->id, co, handle, ctx);
136 nv_wo32(ramht, co + 0, handle);
137 nv_wo32(ramht, co + 4, ctx);
138
139 spin_unlock_irqrestore(&chan->ramht->lock, flags);
140 instmem->flush(dev);
141 return 0;
142 }
143 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
144 chan->id, co, nv_ro32(ramht, co));
145
146 co += 8;
147 if (co >= ramht->size)
148 co = 0;
149 } while (co != ho);
150
151 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
152 list_del(&entry->head);
153 spin_unlock_irqrestore(&chan->ramht->lock, flags);
154 kfree(entry);
155 return -ENOMEM;
156}
157
158static struct nouveau_ramht_entry *
159nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
160{
161 struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
162 struct nouveau_ramht_entry *entry;
163 unsigned long flags;
164
165 if (!ramht)
166 return NULL;
167
168 spin_lock_irqsave(&ramht->lock, flags);
169 list_for_each_entry(entry, &ramht->entries, head) {
170 if (entry->channel == chan &&
171 (!handle || entry->handle == handle)) {
172 list_del(&entry->head);
173 spin_unlock_irqrestore(&ramht->lock, flags);
174
175 return entry;
176 }
177 }
178 spin_unlock_irqrestore(&ramht->lock, flags);
179
180 return NULL;
181}
182
183static void
184nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
185{
186 struct drm_device *dev = chan->dev;
187 struct drm_nouveau_private *dev_priv = dev->dev_private;
188 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
189 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
190 unsigned long flags;
191 u32 co, ho;
192
193 spin_lock_irqsave(&chan->ramht->lock, flags);
194 co = ho = nouveau_ramht_hash_handle(chan, handle);
195 do {
196 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
197 nouveau_ramht_entry_same_channel(chan, ramht, co) &&
198 (handle == nv_ro32(ramht, co))) {
199 NV_DEBUG(dev,
200 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
201 chan->id, co, handle, nv_ro32(ramht, co + 4));
202 nv_wo32(ramht, co + 0, 0x00000000);
203 nv_wo32(ramht, co + 4, 0x00000000);
204 instmem->flush(dev);
205 goto out;
206 }
207
208 co += 8;
209 if (co >= ramht->size)
210 co = 0;
211 } while (co != ho);
212
213 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
214 chan->id, handle);
215out:
216 spin_unlock_irqrestore(&chan->ramht->lock, flags);
217}
218
219int
220nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
221{
222 struct nouveau_ramht_entry *entry;
223
224 entry = nouveau_ramht_remove_entry(chan, handle);
225 if (!entry)
226 return -ENOENT;
227
228 nouveau_ramht_remove_hash(chan, entry->handle);
229 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
230 kfree(entry);
231 return 0;
232}
233
234struct nouveau_gpuobj *
235nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
236{
237 struct nouveau_ramht *ramht = chan->ramht;
238 struct nouveau_ramht_entry *entry;
239 struct nouveau_gpuobj *gpuobj = NULL;
240 unsigned long flags;
241
242 if (unlikely(!chan->ramht))
243 return NULL;
244
245 spin_lock_irqsave(&ramht->lock, flags);
246 list_for_each_entry(entry, &chan->ramht->entries, head) {
247 if (entry->channel == chan && entry->handle == handle) {
248 gpuobj = entry->gpuobj;
249 break;
250 }
251 }
252 spin_unlock_irqrestore(&ramht->lock, flags);
253
254 return gpuobj;
255}
256
257int
258nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
259 struct nouveau_ramht **pramht)
260{
261 struct nouveau_ramht *ramht;
262
263 ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
264 if (!ramht)
265 return -ENOMEM;
266
267 ramht->dev = dev;
268 kref_init(&ramht->refcount);
269 ramht->bits = drm_order(gpuobj->size / 8);
270 INIT_LIST_HEAD(&ramht->entries);
271 spin_lock_init(&ramht->lock);
272 nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
273
274 *pramht = ramht;
275 return 0;
276}
277
278static void
279nouveau_ramht_del(struct kref *ref)
280{
281 struct nouveau_ramht *ramht =
282 container_of(ref, struct nouveau_ramht, refcount);
283
284 nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
285 kfree(ramht);
286}
287
288void
289nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
290 struct nouveau_channel *chan)
291{
292 struct nouveau_ramht_entry *entry;
293 struct nouveau_ramht *ramht;
294
295 if (ref)
296 kref_get(&ref->refcount);
297
298 ramht = *ptr;
299 if (ramht) {
300 while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
301 nouveau_ramht_remove_hash(chan, entry->handle);
302 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
303 kfree(entry);
304 }
305
306 kref_put(&ramht->refcount, nouveau_ramht_del);
307 }
308 *ptr = ref;
309}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 38483a042bc2..ca5492ac2da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,11 +1,10 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h> 1#include <linux/pagemap.h>
4#include <linux/slab.h> 2#include <linux/slab.h>
5 3
6#define NV_CTXDMA_PAGE_SHIFT 12 4#include <subdev/fb.h>
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) 5
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 6#include "nouveau_drm.h"
7#include "nouveau_ttm.h"
9 8
10struct nouveau_sgdma_be { 9struct nouveau_sgdma_be {
11 /* this has to be the first field so populate/unpopulated in 10 /* this has to be the first field so populate/unpopulated in
@@ -13,7 +12,7 @@ struct nouveau_sgdma_be {
13 */ 12 */
14 struct ttm_dma_tt ttm; 13 struct ttm_dma_tt ttm;
15 struct drm_device *dev; 14 struct drm_device *dev;
16 u64 offset; 15 struct nouveau_mem *node;
17}; 16};
18 17
19static void 18static void
@@ -22,7 +21,6 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23 22
24 if (ttm) { 23 if (ttm) {
25 NV_DEBUG(nvbe->dev, "\n");
26 ttm_dma_tt_fini(&nvbe->ttm); 24 ttm_dma_tt_fini(&nvbe->ttm);
27 kfree(nvbe); 25 kfree(nvbe);
28 } 26 }
@@ -32,25 +30,18 @@ static int
32nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 30nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33{ 31{
34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 struct drm_device *dev = nvbe->dev; 33 struct nouveau_mem *node = mem->mm_node;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 u64 size = mem->num_pages << 12;
37 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
38 unsigned i, j, pte;
39
40 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
41
42 nvbe->offset = mem->start << PAGE_SHIFT;
43 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
44 for (i = 0; i < ttm->num_pages; i++) {
45 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
46 uint32_t offset_l = lower_32_bits(dma_offset);
47 35
48 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 36 if (ttm->sg) {
49 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 37 node->sg = ttm->sg;
50 offset_l += NV_CTXDMA_PAGE_SIZE; 38 nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
51 } 39 } else {
40 node->pages = nvbe->ttm.dma_address;
41 nouveau_vm_map_sg(&node->vma[0], 0, size, node);
52 } 42 }
53 43
44 nvbe->node = node;
54 return 0; 45 return 0;
55} 46}
56 47
@@ -58,22 +49,7 @@ static int
58nv04_sgdma_unbind(struct ttm_tt *ttm) 49nv04_sgdma_unbind(struct ttm_tt *ttm)
59{ 50{
60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 51 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
61 struct drm_device *dev = nvbe->dev; 52 nouveau_vm_unmap(&nvbe->node->vma[0]);
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
64 unsigned i, j, pte;
65
66 NV_DEBUG(dev, "\n");
67
68 if (ttm->state != tt_bound)
69 return 0;
70
71 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
72 for (i = 0; i < ttm->num_pages; i++) {
73 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
74 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
75 }
76
77 return 0; 53 return 0;
78} 54}
79 55
@@ -83,206 +59,6 @@ static struct ttm_backend_func nv04_sgdma_backend = {
83 .destroy = nouveau_sgdma_destroy 59 .destroy = nouveau_sgdma_destroy
84}; 60};
85 61
86static void
87nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
88{
89 struct drm_device *dev = nvbe->dev;
90
91 nv_wr32(dev, 0x100810, 0x00000022);
92 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
93 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
94 nv_rd32(dev, 0x100810));
95 nv_wr32(dev, 0x100810, 0x00000000);
96}
97
98static int
99nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
100{
101 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
102 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
103 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
104 dma_addr_t *list = nvbe->ttm.dma_address;
105 u32 pte = mem->start << 2;
106 u32 cnt = ttm->num_pages;
107
108 nvbe->offset = mem->start << PAGE_SHIFT;
109
110 while (cnt--) {
111 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
112 pte += 4;
113 }
114
115 nv41_sgdma_flush(nvbe);
116 return 0;
117}
118
119static int
120nv41_sgdma_unbind(struct ttm_tt *ttm)
121{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
124 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
125 u32 pte = (nvbe->offset >> 12) << 2;
126 u32 cnt = ttm->num_pages;
127
128 while (cnt--) {
129 nv_wo32(pgt, pte, 0x00000000);
130 pte += 4;
131 }
132
133 nv41_sgdma_flush(nvbe);
134 return 0;
135}
136
137static struct ttm_backend_func nv41_sgdma_backend = {
138 .bind = nv41_sgdma_bind,
139 .unbind = nv41_sgdma_unbind,
140 .destroy = nouveau_sgdma_destroy
141};
142
143static void
144nv44_sgdma_flush(struct ttm_tt *ttm)
145{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
147 struct drm_device *dev = nvbe->dev;
148
149 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
150 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
151 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
152 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
153 nv_rd32(dev, 0x100808));
154 nv_wr32(dev, 0x100808, 0x00000000);
155}
156
157static void
158nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
159{
160 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
161 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
162 u32 pte, tmp[4];
163
164 pte = base >> 2;
165 base &= ~0x0000000f;
166
167 tmp[0] = nv_ro32(pgt, base + 0x0);
168 tmp[1] = nv_ro32(pgt, base + 0x4);
169 tmp[2] = nv_ro32(pgt, base + 0x8);
170 tmp[3] = nv_ro32(pgt, base + 0xc);
171 while (cnt--) {
172 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
173 switch (pte++ & 0x3) {
174 case 0:
175 tmp[0] &= ~0x07ffffff;
176 tmp[0] |= addr;
177 break;
178 case 1:
179 tmp[0] &= ~0xf8000000;
180 tmp[0] |= addr << 27;
181 tmp[1] &= ~0x003fffff;
182 tmp[1] |= addr >> 5;
183 break;
184 case 2:
185 tmp[1] &= ~0xffc00000;
186 tmp[1] |= addr << 22;
187 tmp[2] &= ~0x0001ffff;
188 tmp[2] |= addr >> 10;
189 break;
190 case 3:
191 tmp[2] &= ~0xfffe0000;
192 tmp[2] |= addr << 17;
193 tmp[3] &= ~0x00000fff;
194 tmp[3] |= addr >> 15;
195 break;
196 }
197 }
198
199 tmp[3] |= 0x40000000;
200
201 nv_wo32(pgt, base + 0x0, tmp[0]);
202 nv_wo32(pgt, base + 0x4, tmp[1]);
203 nv_wo32(pgt, base + 0x8, tmp[2]);
204 nv_wo32(pgt, base + 0xc, tmp[3]);
205}
206
207static int
208nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
209{
210 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
211 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
212 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
213 dma_addr_t *list = nvbe->ttm.dma_address;
214 u32 pte = mem->start << 2, tmp[4];
215 u32 cnt = ttm->num_pages;
216 int i;
217
218 nvbe->offset = mem->start << PAGE_SHIFT;
219
220 if (pte & 0x0000000c) {
221 u32 max = 4 - ((pte >> 2) & 0x3);
222 u32 part = (cnt > max) ? max : cnt;
223 nv44_sgdma_fill(pgt, list, pte, part);
224 pte += (part << 2);
225 list += part;
226 cnt -= part;
227 }
228
229 while (cnt >= 4) {
230 for (i = 0; i < 4; i++)
231 tmp[i] = *list++ >> 12;
232 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
233 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
234 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
235 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
236 pte += 0x10;
237 cnt -= 4;
238 }
239
240 if (cnt)
241 nv44_sgdma_fill(pgt, list, pte, cnt);
242
243 nv44_sgdma_flush(ttm);
244 return 0;
245}
246
247static int
248nv44_sgdma_unbind(struct ttm_tt *ttm)
249{
250 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
251 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
252 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
253 u32 pte = (nvbe->offset >> 12) << 2;
254 u32 cnt = ttm->num_pages;
255
256 if (pte & 0x0000000c) {
257 u32 max = 4 - ((pte >> 2) & 0x3);
258 u32 part = (cnt > max) ? max : cnt;
259 nv44_sgdma_fill(pgt, NULL, pte, part);
260 pte += (part << 2);
261 cnt -= part;
262 }
263
264 while (cnt >= 4) {
265 nv_wo32(pgt, pte + 0x0, 0x00000000);
266 nv_wo32(pgt, pte + 0x4, 0x00000000);
267 nv_wo32(pgt, pte + 0x8, 0x00000000);
268 nv_wo32(pgt, pte + 0xc, 0x00000000);
269 pte += 0x10;
270 cnt -= 4;
271 }
272
273 if (cnt)
274 nv44_sgdma_fill(pgt, NULL, pte, cnt);
275
276 nv44_sgdma_flush(ttm);
277 return 0;
278}
279
280static struct ttm_backend_func nv44_sgdma_backend = {
281 .bind = nv44_sgdma_bind,
282 .unbind = nv44_sgdma_unbind,
283 .destroy = nouveau_sgdma_destroy
284};
285
286static int 62static int
287nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 63nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
288{ 64{
@@ -315,16 +91,18 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
315 unsigned long size, uint32_t page_flags, 91 unsigned long size, uint32_t page_flags,
316 struct page *dummy_read_page) 92 struct page *dummy_read_page)
317{ 93{
318 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 94 struct nouveau_drm *drm = nouveau_bdev(bdev);
319 struct drm_device *dev = dev_priv->dev;
320 struct nouveau_sgdma_be *nvbe; 95 struct nouveau_sgdma_be *nvbe;
321 96
322 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 97 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
323 if (!nvbe) 98 if (!nvbe)
324 return NULL; 99 return NULL;
325 100
326 nvbe->dev = dev; 101 nvbe->dev = drm->dev;
327 nvbe->ttm.ttm.func = dev_priv->gart_info.func; 102 if (nv_device(drm->device)->card_type < NV_50)
103 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
328 106
329 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
330 kfree(nvbe); 108 kfree(nvbe);
@@ -332,116 +110,3 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
332 } 110 }
333 return &nvbe->ttm.ttm; 111 return &nvbe->ttm.ttm;
334} 112}
335
336int
337nouveau_sgdma_init(struct drm_device *dev)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_gpuobj *gpuobj = NULL;
341 u32 aper_size, align;
342 int ret;
343
344 if (dev_priv->card_type >= NV_40)
345 aper_size = 512 * 1024 * 1024;
346 else
347 aper_size = 128 * 1024 * 1024;
348
349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
350 * christmas. The cards before it have them, the cards after
351 * it have them, why is NV44 so unloved?
352 */
353 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
354 if (!dev_priv->gart_info.dummy.page)
355 return -ENOMEM;
356
357 dev_priv->gart_info.dummy.addr =
358 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
359 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
360 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
361 NV_ERROR(dev, "error mapping dummy page\n");
362 __free_page(dev_priv->gart_info.dummy.page);
363 dev_priv->gart_info.dummy.page = NULL;
364 return -ENOMEM;
365 }
366
367 if (dev_priv->card_type >= NV_50) {
368 dev_priv->gart_info.aper_base = 0;
369 dev_priv->gart_info.aper_size = aper_size;
370 dev_priv->gart_info.type = NOUVEAU_GART_HW;
371 dev_priv->gart_info.func = &nv50_sgdma_backend;
372 } else
373 if (0 && pci_is_pcie(dev->pdev) &&
374 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
375 if (nv44_graph_class(dev)) {
376 dev_priv->gart_info.func = &nv44_sgdma_backend;
377 align = 512 * 1024;
378 } else {
379 dev_priv->gart_info.func = &nv41_sgdma_backend;
380 align = 16;
381 }
382
383 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
384 NVOBJ_FLAG_ZERO_ALLOC |
385 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
386 if (ret) {
387 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
388 return ret;
389 }
390
391 dev_priv->gart_info.sg_ctxdma = gpuobj;
392 dev_priv->gart_info.aper_base = 0;
393 dev_priv->gart_info.aper_size = aper_size;
394 dev_priv->gart_info.type = NOUVEAU_GART_HW;
395 } else {
396 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
397 NVOBJ_FLAG_ZERO_ALLOC |
398 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
399 if (ret) {
400 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
401 return ret;
402 }
403
404 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
405 (1 << 12) /* PT present */ |
406 (0 << 13) /* PT *not* linear */ |
407 (0 << 14) /* RW */ |
408 (2 << 16) /* PCI */);
409 nv_wo32(gpuobj, 4, aper_size - 1);
410
411 dev_priv->gart_info.sg_ctxdma = gpuobj;
412 dev_priv->gart_info.aper_base = 0;
413 dev_priv->gart_info.aper_size = aper_size;
414 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
415 dev_priv->gart_info.func = &nv04_sgdma_backend;
416 }
417
418 return 0;
419}
420
421void
422nouveau_sgdma_takedown(struct drm_device *dev)
423{
424 struct drm_nouveau_private *dev_priv = dev->dev_private;
425
426 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
427
428 if (dev_priv->gart_info.dummy.page) {
429 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
430 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
431 __free_page(dev_priv->gart_info.dummy.page);
432 dev_priv->gart_info.dummy.page = NULL;
433 }
434}
435
436uint32_t
437nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
438{
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
441 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
442
443 BUG_ON(dev_priv->card_type >= NV_50);
444
445 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
446 (offset & NV_CTXDMA_PAGE_MASK);
447}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
deleted file mode 100644
index 709e5ac680ec..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4struct nouveau_software_priv {
5 struct nouveau_exec_engine base;
6 struct list_head vblank;
7 spinlock_t peephole_lock;
8};
9
10struct nouveau_software_chan {
11 struct list_head flip;
12 struct {
13 struct list_head list;
14 u32 channel;
15 u32 ctxdma;
16 u32 offset;
17 u32 value;
18 u32 head;
19 } vblank;
20};
21
22static inline void
23nouveau_software_context_new(struct nouveau_software_chan *pch)
24{
25 INIT_LIST_HEAD(&pch->flip);
26 INIT_LIST_HEAD(&pch->vblank.list);
27}
28
29static inline void
30nouveau_software_create(struct nouveau_software_priv *psw)
31{
32 INIT_LIST_HEAD(&psw->vblank);
33 spin_lock_init(&psw->peephole_lock);
34}
35
36static inline u16
37nouveau_software_class(struct drm_device *dev)
38{
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 if (dev_priv->card_type <= NV_04)
41 return 0x006e;
42 if (dev_priv->card_type <= NV_40)
43 return 0x016e;
44 if (dev_priv->card_type <= NV_50)
45 return 0x506e;
46 if (dev_priv->card_type <= NV_E0)
47 return 0x906e;
48 return 0x0000;
49}
50
51int nv04_software_create(struct drm_device *);
52int nv50_software_create(struct drm_device *);
53int nvc0_software_create(struct drm_device *);
54u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
55
56#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
deleted file mode 100644
index c61014442aa9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ /dev/null
@@ -1,1306 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include <linux/slab.h>
28#include "drmP.h"
29#include "drm.h"
30#include "drm_sarea.h"
31#include "drm_crtc_helper.h"
32#include <linux/vgaarb.h>
33#include <linux/vga_switcheroo.h>
34
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37#include "nouveau_fbcon.h"
38#include "nouveau_ramht.h"
39#include "nouveau_gpio.h"
40#include "nouveau_pm.h"
41#include "nv50_display.h"
42#include "nouveau_fifo.h"
43#include "nouveau_fence.h"
44#include "nouveau_software.h"
45
46static void nouveau_stub_takedown(struct drm_device *dev) {}
47static int nouveau_stub_init(struct drm_device *dev) { return 0; }
48
49static int nouveau_init_engine_ptrs(struct drm_device *dev)
50{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_engine *engine = &dev_priv->engine;
53
54 switch (dev_priv->chipset & 0xf0) {
55 case 0x00:
56 engine->instmem.init = nv04_instmem_init;
57 engine->instmem.takedown = nv04_instmem_takedown;
58 engine->instmem.suspend = nv04_instmem_suspend;
59 engine->instmem.resume = nv04_instmem_resume;
60 engine->instmem.get = nv04_instmem_get;
61 engine->instmem.put = nv04_instmem_put;
62 engine->instmem.map = nv04_instmem_map;
63 engine->instmem.unmap = nv04_instmem_unmap;
64 engine->instmem.flush = nv04_instmem_flush;
65 engine->mc.init = nv04_mc_init;
66 engine->mc.takedown = nv04_mc_takedown;
67 engine->timer.init = nv04_timer_init;
68 engine->timer.read = nv04_timer_read;
69 engine->timer.takedown = nv04_timer_takedown;
70 engine->fb.init = nv04_fb_init;
71 engine->fb.takedown = nv04_fb_takedown;
72 engine->display.early_init = nv04_display_early_init;
73 engine->display.late_takedown = nv04_display_late_takedown;
74 engine->display.create = nv04_display_create;
75 engine->display.destroy = nv04_display_destroy;
76 engine->display.init = nv04_display_init;
77 engine->display.fini = nv04_display_fini;
78 engine->pm.clocks_get = nv04_pm_clocks_get;
79 engine->pm.clocks_pre = nv04_pm_clocks_pre;
80 engine->pm.clocks_set = nv04_pm_clocks_set;
81 engine->vram.init = nv04_fb_vram_init;
82 engine->vram.takedown = nouveau_stub_takedown;
83 engine->vram.flags_valid = nouveau_mem_flags_valid;
84 break;
85 case 0x10:
86 engine->instmem.init = nv04_instmem_init;
87 engine->instmem.takedown = nv04_instmem_takedown;
88 engine->instmem.suspend = nv04_instmem_suspend;
89 engine->instmem.resume = nv04_instmem_resume;
90 engine->instmem.get = nv04_instmem_get;
91 engine->instmem.put = nv04_instmem_put;
92 engine->instmem.map = nv04_instmem_map;
93 engine->instmem.unmap = nv04_instmem_unmap;
94 engine->instmem.flush = nv04_instmem_flush;
95 engine->mc.init = nv04_mc_init;
96 engine->mc.takedown = nv04_mc_takedown;
97 engine->timer.init = nv04_timer_init;
98 engine->timer.read = nv04_timer_read;
99 engine->timer.takedown = nv04_timer_takedown;
100 engine->fb.init = nv10_fb_init;
101 engine->fb.takedown = nv10_fb_takedown;
102 engine->fb.init_tile_region = nv10_fb_init_tile_region;
103 engine->fb.set_tile_region = nv10_fb_set_tile_region;
104 engine->fb.free_tile_region = nv10_fb_free_tile_region;
105 engine->display.early_init = nv04_display_early_init;
106 engine->display.late_takedown = nv04_display_late_takedown;
107 engine->display.create = nv04_display_create;
108 engine->display.destroy = nv04_display_destroy;
109 engine->display.init = nv04_display_init;
110 engine->display.fini = nv04_display_fini;
111 engine->gpio.drive = nv10_gpio_drive;
112 engine->gpio.sense = nv10_gpio_sense;
113 engine->pm.clocks_get = nv04_pm_clocks_get;
114 engine->pm.clocks_pre = nv04_pm_clocks_pre;
115 engine->pm.clocks_set = nv04_pm_clocks_set;
116 if (dev_priv->chipset == 0x1a ||
117 dev_priv->chipset == 0x1f)
118 engine->vram.init = nv1a_fb_vram_init;
119 else
120 engine->vram.init = nv10_fb_vram_init;
121 engine->vram.takedown = nouveau_stub_takedown;
122 engine->vram.flags_valid = nouveau_mem_flags_valid;
123 break;
124 case 0x20:
125 engine->instmem.init = nv04_instmem_init;
126 engine->instmem.takedown = nv04_instmem_takedown;
127 engine->instmem.suspend = nv04_instmem_suspend;
128 engine->instmem.resume = nv04_instmem_resume;
129 engine->instmem.get = nv04_instmem_get;
130 engine->instmem.put = nv04_instmem_put;
131 engine->instmem.map = nv04_instmem_map;
132 engine->instmem.unmap = nv04_instmem_unmap;
133 engine->instmem.flush = nv04_instmem_flush;
134 engine->mc.init = nv04_mc_init;
135 engine->mc.takedown = nv04_mc_takedown;
136 engine->timer.init = nv04_timer_init;
137 engine->timer.read = nv04_timer_read;
138 engine->timer.takedown = nv04_timer_takedown;
139 engine->fb.init = nv20_fb_init;
140 engine->fb.takedown = nv20_fb_takedown;
141 engine->fb.init_tile_region = nv20_fb_init_tile_region;
142 engine->fb.set_tile_region = nv20_fb_set_tile_region;
143 engine->fb.free_tile_region = nv20_fb_free_tile_region;
144 engine->display.early_init = nv04_display_early_init;
145 engine->display.late_takedown = nv04_display_late_takedown;
146 engine->display.create = nv04_display_create;
147 engine->display.destroy = nv04_display_destroy;
148 engine->display.init = nv04_display_init;
149 engine->display.fini = nv04_display_fini;
150 engine->gpio.drive = nv10_gpio_drive;
151 engine->gpio.sense = nv10_gpio_sense;
152 engine->pm.clocks_get = nv04_pm_clocks_get;
153 engine->pm.clocks_pre = nv04_pm_clocks_pre;
154 engine->pm.clocks_set = nv04_pm_clocks_set;
155 engine->vram.init = nv20_fb_vram_init;
156 engine->vram.takedown = nouveau_stub_takedown;
157 engine->vram.flags_valid = nouveau_mem_flags_valid;
158 break;
159 case 0x30:
160 engine->instmem.init = nv04_instmem_init;
161 engine->instmem.takedown = nv04_instmem_takedown;
162 engine->instmem.suspend = nv04_instmem_suspend;
163 engine->instmem.resume = nv04_instmem_resume;
164 engine->instmem.get = nv04_instmem_get;
165 engine->instmem.put = nv04_instmem_put;
166 engine->instmem.map = nv04_instmem_map;
167 engine->instmem.unmap = nv04_instmem_unmap;
168 engine->instmem.flush = nv04_instmem_flush;
169 engine->mc.init = nv04_mc_init;
170 engine->mc.takedown = nv04_mc_takedown;
171 engine->timer.init = nv04_timer_init;
172 engine->timer.read = nv04_timer_read;
173 engine->timer.takedown = nv04_timer_takedown;
174 engine->fb.init = nv30_fb_init;
175 engine->fb.takedown = nv30_fb_takedown;
176 engine->fb.init_tile_region = nv30_fb_init_tile_region;
177 engine->fb.set_tile_region = nv10_fb_set_tile_region;
178 engine->fb.free_tile_region = nv30_fb_free_tile_region;
179 engine->display.early_init = nv04_display_early_init;
180 engine->display.late_takedown = nv04_display_late_takedown;
181 engine->display.create = nv04_display_create;
182 engine->display.destroy = nv04_display_destroy;
183 engine->display.init = nv04_display_init;
184 engine->display.fini = nv04_display_fini;
185 engine->gpio.drive = nv10_gpio_drive;
186 engine->gpio.sense = nv10_gpio_sense;
187 engine->pm.clocks_get = nv04_pm_clocks_get;
188 engine->pm.clocks_pre = nv04_pm_clocks_pre;
189 engine->pm.clocks_set = nv04_pm_clocks_set;
190 engine->pm.voltage_get = nouveau_voltage_gpio_get;
191 engine->pm.voltage_set = nouveau_voltage_gpio_set;
192 engine->vram.init = nv20_fb_vram_init;
193 engine->vram.takedown = nouveau_stub_takedown;
194 engine->vram.flags_valid = nouveau_mem_flags_valid;
195 break;
196 case 0x40:
197 case 0x60:
198 engine->instmem.init = nv04_instmem_init;
199 engine->instmem.takedown = nv04_instmem_takedown;
200 engine->instmem.suspend = nv04_instmem_suspend;
201 engine->instmem.resume = nv04_instmem_resume;
202 engine->instmem.get = nv04_instmem_get;
203 engine->instmem.put = nv04_instmem_put;
204 engine->instmem.map = nv04_instmem_map;
205 engine->instmem.unmap = nv04_instmem_unmap;
206 engine->instmem.flush = nv04_instmem_flush;
207 engine->mc.init = nv40_mc_init;
208 engine->mc.takedown = nv40_mc_takedown;
209 engine->timer.init = nv04_timer_init;
210 engine->timer.read = nv04_timer_read;
211 engine->timer.takedown = nv04_timer_takedown;
212 engine->fb.init = nv40_fb_init;
213 engine->fb.takedown = nv40_fb_takedown;
214 engine->fb.init_tile_region = nv30_fb_init_tile_region;
215 engine->fb.set_tile_region = nv40_fb_set_tile_region;
216 engine->fb.free_tile_region = nv30_fb_free_tile_region;
217 engine->display.early_init = nv04_display_early_init;
218 engine->display.late_takedown = nv04_display_late_takedown;
219 engine->display.create = nv04_display_create;
220 engine->display.destroy = nv04_display_destroy;
221 engine->display.init = nv04_display_init;
222 engine->display.fini = nv04_display_fini;
223 engine->gpio.init = nv10_gpio_init;
224 engine->gpio.fini = nv10_gpio_fini;
225 engine->gpio.drive = nv10_gpio_drive;
226 engine->gpio.sense = nv10_gpio_sense;
227 engine->gpio.irq_enable = nv10_gpio_irq_enable;
228 engine->pm.clocks_get = nv40_pm_clocks_get;
229 engine->pm.clocks_pre = nv40_pm_clocks_pre;
230 engine->pm.clocks_set = nv40_pm_clocks_set;
231 engine->pm.voltage_get = nouveau_voltage_gpio_get;
232 engine->pm.voltage_set = nouveau_voltage_gpio_set;
233 engine->pm.temp_get = nv40_temp_get;
234 engine->pm.pwm_get = nv40_pm_pwm_get;
235 engine->pm.pwm_set = nv40_pm_pwm_set;
236 engine->vram.init = nv40_fb_vram_init;
237 engine->vram.takedown = nouveau_stub_takedown;
238 engine->vram.flags_valid = nouveau_mem_flags_valid;
239 break;
240 case 0x50:
241 case 0x80: /* gotta love NVIDIA's consistency.. */
242 case 0x90:
243 case 0xa0:
244 engine->instmem.init = nv50_instmem_init;
245 engine->instmem.takedown = nv50_instmem_takedown;
246 engine->instmem.suspend = nv50_instmem_suspend;
247 engine->instmem.resume = nv50_instmem_resume;
248 engine->instmem.get = nv50_instmem_get;
249 engine->instmem.put = nv50_instmem_put;
250 engine->instmem.map = nv50_instmem_map;
251 engine->instmem.unmap = nv50_instmem_unmap;
252 if (dev_priv->chipset == 0x50)
253 engine->instmem.flush = nv50_instmem_flush;
254 else
255 engine->instmem.flush = nv84_instmem_flush;
256 engine->mc.init = nv50_mc_init;
257 engine->mc.takedown = nv50_mc_takedown;
258 engine->timer.init = nv04_timer_init;
259 engine->timer.read = nv04_timer_read;
260 engine->timer.takedown = nv04_timer_takedown;
261 engine->fb.init = nv50_fb_init;
262 engine->fb.takedown = nv50_fb_takedown;
263 engine->display.early_init = nv50_display_early_init;
264 engine->display.late_takedown = nv50_display_late_takedown;
265 engine->display.create = nv50_display_create;
266 engine->display.destroy = nv50_display_destroy;
267 engine->display.init = nv50_display_init;
268 engine->display.fini = nv50_display_fini;
269 engine->gpio.init = nv50_gpio_init;
270 engine->gpio.fini = nv50_gpio_fini;
271 engine->gpio.drive = nv50_gpio_drive;
272 engine->gpio.sense = nv50_gpio_sense;
273 engine->gpio.irq_enable = nv50_gpio_irq_enable;
274 switch (dev_priv->chipset) {
275 case 0x84:
276 case 0x86:
277 case 0x92:
278 case 0x94:
279 case 0x96:
280 case 0x98:
281 case 0xa0:
282 case 0xaa:
283 case 0xac:
284 case 0x50:
285 engine->pm.clocks_get = nv50_pm_clocks_get;
286 engine->pm.clocks_pre = nv50_pm_clocks_pre;
287 engine->pm.clocks_set = nv50_pm_clocks_set;
288 break;
289 default:
290 engine->pm.clocks_get = nva3_pm_clocks_get;
291 engine->pm.clocks_pre = nva3_pm_clocks_pre;
292 engine->pm.clocks_set = nva3_pm_clocks_set;
293 break;
294 }
295 engine->pm.voltage_get = nouveau_voltage_gpio_get;
296 engine->pm.voltage_set = nouveau_voltage_gpio_set;
297 if (dev_priv->chipset >= 0x84)
298 engine->pm.temp_get = nv84_temp_get;
299 else
300 engine->pm.temp_get = nv40_temp_get;
301 engine->pm.pwm_get = nv50_pm_pwm_get;
302 engine->pm.pwm_set = nv50_pm_pwm_set;
303 engine->vram.init = nv50_vram_init;
304 engine->vram.takedown = nv50_vram_fini;
305 engine->vram.get = nv50_vram_new;
306 engine->vram.put = nv50_vram_del;
307 engine->vram.flags_valid = nv50_vram_flags_valid;
308 break;
309 case 0xc0:
310 engine->instmem.init = nvc0_instmem_init;
311 engine->instmem.takedown = nvc0_instmem_takedown;
312 engine->instmem.suspend = nvc0_instmem_suspend;
313 engine->instmem.resume = nvc0_instmem_resume;
314 engine->instmem.get = nv50_instmem_get;
315 engine->instmem.put = nv50_instmem_put;
316 engine->instmem.map = nv50_instmem_map;
317 engine->instmem.unmap = nv50_instmem_unmap;
318 engine->instmem.flush = nv84_instmem_flush;
319 engine->mc.init = nv50_mc_init;
320 engine->mc.takedown = nv50_mc_takedown;
321 engine->timer.init = nv04_timer_init;
322 engine->timer.read = nv04_timer_read;
323 engine->timer.takedown = nv04_timer_takedown;
324 engine->fb.init = nvc0_fb_init;
325 engine->fb.takedown = nvc0_fb_takedown;
326 engine->display.early_init = nv50_display_early_init;
327 engine->display.late_takedown = nv50_display_late_takedown;
328 engine->display.create = nv50_display_create;
329 engine->display.destroy = nv50_display_destroy;
330 engine->display.init = nv50_display_init;
331 engine->display.fini = nv50_display_fini;
332 engine->gpio.init = nv50_gpio_init;
333 engine->gpio.fini = nv50_gpio_fini;
334 engine->gpio.drive = nv50_gpio_drive;
335 engine->gpio.sense = nv50_gpio_sense;
336 engine->gpio.irq_enable = nv50_gpio_irq_enable;
337 engine->vram.init = nvc0_vram_init;
338 engine->vram.takedown = nv50_vram_fini;
339 engine->vram.get = nvc0_vram_new;
340 engine->vram.put = nv50_vram_del;
341 engine->vram.flags_valid = nvc0_vram_flags_valid;
342 engine->pm.temp_get = nv84_temp_get;
343 engine->pm.clocks_get = nvc0_pm_clocks_get;
344 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
345 engine->pm.clocks_set = nvc0_pm_clocks_set;
346 engine->pm.voltage_get = nouveau_voltage_gpio_get;
347 engine->pm.voltage_set = nouveau_voltage_gpio_set;
348 engine->pm.pwm_get = nv50_pm_pwm_get;
349 engine->pm.pwm_set = nv50_pm_pwm_set;
350 break;
351 case 0xd0:
352 engine->instmem.init = nvc0_instmem_init;
353 engine->instmem.takedown = nvc0_instmem_takedown;
354 engine->instmem.suspend = nvc0_instmem_suspend;
355 engine->instmem.resume = nvc0_instmem_resume;
356 engine->instmem.get = nv50_instmem_get;
357 engine->instmem.put = nv50_instmem_put;
358 engine->instmem.map = nv50_instmem_map;
359 engine->instmem.unmap = nv50_instmem_unmap;
360 engine->instmem.flush = nv84_instmem_flush;
361 engine->mc.init = nv50_mc_init;
362 engine->mc.takedown = nv50_mc_takedown;
363 engine->timer.init = nv04_timer_init;
364 engine->timer.read = nv04_timer_read;
365 engine->timer.takedown = nv04_timer_takedown;
366 engine->fb.init = nvc0_fb_init;
367 engine->fb.takedown = nvc0_fb_takedown;
368 engine->display.early_init = nouveau_stub_init;
369 engine->display.late_takedown = nouveau_stub_takedown;
370 engine->display.create = nvd0_display_create;
371 engine->display.destroy = nvd0_display_destroy;
372 engine->display.init = nvd0_display_init;
373 engine->display.fini = nvd0_display_fini;
374 engine->gpio.init = nv50_gpio_init;
375 engine->gpio.fini = nv50_gpio_fini;
376 engine->gpio.drive = nvd0_gpio_drive;
377 engine->gpio.sense = nvd0_gpio_sense;
378 engine->gpio.irq_enable = nv50_gpio_irq_enable;
379 engine->vram.init = nvc0_vram_init;
380 engine->vram.takedown = nv50_vram_fini;
381 engine->vram.get = nvc0_vram_new;
382 engine->vram.put = nv50_vram_del;
383 engine->vram.flags_valid = nvc0_vram_flags_valid;
384 engine->pm.temp_get = nv84_temp_get;
385 engine->pm.clocks_get = nvc0_pm_clocks_get;
386 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
387 engine->pm.clocks_set = nvc0_pm_clocks_set;
388 engine->pm.voltage_get = nouveau_voltage_gpio_get;
389 engine->pm.voltage_set = nouveau_voltage_gpio_set;
390 break;
391 case 0xe0:
392 engine->instmem.init = nvc0_instmem_init;
393 engine->instmem.takedown = nvc0_instmem_takedown;
394 engine->instmem.suspend = nvc0_instmem_suspend;
395 engine->instmem.resume = nvc0_instmem_resume;
396 engine->instmem.get = nv50_instmem_get;
397 engine->instmem.put = nv50_instmem_put;
398 engine->instmem.map = nv50_instmem_map;
399 engine->instmem.unmap = nv50_instmem_unmap;
400 engine->instmem.flush = nv84_instmem_flush;
401 engine->mc.init = nv50_mc_init;
402 engine->mc.takedown = nv50_mc_takedown;
403 engine->timer.init = nv04_timer_init;
404 engine->timer.read = nv04_timer_read;
405 engine->timer.takedown = nv04_timer_takedown;
406 engine->fb.init = nvc0_fb_init;
407 engine->fb.takedown = nvc0_fb_takedown;
408 engine->display.early_init = nouveau_stub_init;
409 engine->display.late_takedown = nouveau_stub_takedown;
410 engine->display.create = nvd0_display_create;
411 engine->display.destroy = nvd0_display_destroy;
412 engine->display.init = nvd0_display_init;
413 engine->display.fini = nvd0_display_fini;
414 engine->gpio.init = nv50_gpio_init;
415 engine->gpio.fini = nv50_gpio_fini;
416 engine->gpio.drive = nvd0_gpio_drive;
417 engine->gpio.sense = nvd0_gpio_sense;
418 engine->gpio.irq_enable = nv50_gpio_irq_enable;
419 engine->vram.init = nvc0_vram_init;
420 engine->vram.takedown = nv50_vram_fini;
421 engine->vram.get = nvc0_vram_new;
422 engine->vram.put = nv50_vram_del;
423 engine->vram.flags_valid = nvc0_vram_flags_valid;
424 break;
425 default:
426 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
427 return 1;
428 }
429
430 /* headless mode */
431 if (nouveau_modeset == 2) {
432 engine->display.early_init = nouveau_stub_init;
433 engine->display.late_takedown = nouveau_stub_takedown;
434 engine->display.create = nouveau_stub_init;
435 engine->display.init = nouveau_stub_init;
436 engine->display.destroy = nouveau_stub_takedown;
437 }
438
439 return 0;
440}
441
442static unsigned int
443nouveau_vga_set_decode(void *priv, bool state)
444{
445 struct drm_device *dev = priv;
446 struct drm_nouveau_private *dev_priv = dev->dev_private;
447
448 if (dev_priv->chipset >= 0x40)
449 nv_wr32(dev, 0x88054, state);
450 else
451 nv_wr32(dev, 0x1854, state);
452
453 if (state)
454 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
455 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
456 else
457 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
458}
459
460static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
461 enum vga_switcheroo_state state)
462{
463 struct drm_device *dev = pci_get_drvdata(pdev);
464 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
465 if (state == VGA_SWITCHEROO_ON) {
466 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
467 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
468 nouveau_pci_resume(pdev);
469 drm_kms_helper_poll_enable(dev);
470 dev->switch_power_state = DRM_SWITCH_POWER_ON;
471 } else {
472 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
473 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
474 drm_kms_helper_poll_disable(dev);
475 nouveau_switcheroo_optimus_dsm();
476 nouveau_pci_suspend(pdev, pmm);
477 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
478 }
479}
480
481static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
482{
483 struct drm_device *dev = pci_get_drvdata(pdev);
484 nouveau_fbcon_output_poll_changed(dev);
485}
486
487static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
488{
489 struct drm_device *dev = pci_get_drvdata(pdev);
490 bool can_switch;
491
492 spin_lock(&dev->count_lock);
493 can_switch = (dev->open_count == 0);
494 spin_unlock(&dev->count_lock);
495 return can_switch;
496}
497
498static void
499nouveau_card_channel_fini(struct drm_device *dev)
500{
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502
503 if (dev_priv->channel)
504 nouveau_channel_put_unlocked(&dev_priv->channel);
505}
506
507static int
508nouveau_card_channel_init(struct drm_device *dev)
509{
510 struct drm_nouveau_private *dev_priv = dev->dev_private;
511 struct nouveau_channel *chan;
512 int ret;
513
514 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
515 dev_priv->channel = chan;
516 if (ret)
517 return ret;
518 mutex_unlock(&dev_priv->channel->mutex);
519
520 nouveau_bo_move_init(chan);
521 return 0;
522}
523
524static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
525 .set_gpu_state = nouveau_switcheroo_set_state,
526 .reprobe = nouveau_switcheroo_reprobe,
527 .can_switch = nouveau_switcheroo_can_switch,
528};
529
530int
531nouveau_card_init(struct drm_device *dev)
532{
533 struct drm_nouveau_private *dev_priv = dev->dev_private;
534 struct nouveau_engine *engine;
535 int ret, e = 0;
536
537 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
538 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
539
540 /* Initialise internal driver API hooks */
541 ret = nouveau_init_engine_ptrs(dev);
542 if (ret)
543 goto out;
544 engine = &dev_priv->engine;
545 spin_lock_init(&dev_priv->channels.lock);
546 spin_lock_init(&dev_priv->tile.lock);
547 spin_lock_init(&dev_priv->context_switch_lock);
548 spin_lock_init(&dev_priv->vm_lock);
549
550 /* Make the CRTCs and I2C buses accessible */
551 ret = engine->display.early_init(dev);
552 if (ret)
553 goto out;
554
555 /* Parse BIOS tables / Run init tables if card not POSTed */
556 ret = nouveau_bios_init(dev);
557 if (ret)
558 goto out_display_early;
559
560 /* workaround an odd issue on nvc1 by disabling the device's
561 * nosnoop capability. hopefully won't cause issues until a
562 * better fix is found - assuming there is one...
563 */
564 if (dev_priv->chipset == 0xc1) {
565 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
566 }
567
568 /* PMC */
569 ret = engine->mc.init(dev);
570 if (ret)
571 goto out_bios;
572
573 /* PTIMER */
574 ret = engine->timer.init(dev);
575 if (ret)
576 goto out_mc;
577
578 /* PFB */
579 ret = engine->fb.init(dev);
580 if (ret)
581 goto out_timer;
582
583 ret = engine->vram.init(dev);
584 if (ret)
585 goto out_fb;
586
587 /* PGPIO */
588 ret = nouveau_gpio_create(dev);
589 if (ret)
590 goto out_vram;
591
592 ret = nouveau_gpuobj_init(dev);
593 if (ret)
594 goto out_gpio;
595
596 ret = engine->instmem.init(dev);
597 if (ret)
598 goto out_gpuobj;
599
600 ret = nouveau_mem_vram_init(dev);
601 if (ret)
602 goto out_instmem;
603
604 ret = nouveau_mem_gart_init(dev);
605 if (ret)
606 goto out_ttmvram;
607
608 if (!dev_priv->noaccel) {
609 switch (dev_priv->card_type) {
610 case NV_04:
611 nv04_fifo_create(dev);
612 break;
613 case NV_10:
614 case NV_20:
615 case NV_30:
616 if (dev_priv->chipset < 0x17)
617 nv10_fifo_create(dev);
618 else
619 nv17_fifo_create(dev);
620 break;
621 case NV_40:
622 nv40_fifo_create(dev);
623 break;
624 case NV_50:
625 if (dev_priv->chipset == 0x50)
626 nv50_fifo_create(dev);
627 else
628 nv84_fifo_create(dev);
629 break;
630 case NV_C0:
631 case NV_D0:
632 nvc0_fifo_create(dev);
633 break;
634 case NV_E0:
635 nve0_fifo_create(dev);
636 break;
637 default:
638 break;
639 }
640
641 switch (dev_priv->card_type) {
642 case NV_04:
643 nv04_fence_create(dev);
644 break;
645 case NV_10:
646 case NV_20:
647 case NV_30:
648 case NV_40:
649 case NV_50:
650 if (dev_priv->chipset < 0x84)
651 nv10_fence_create(dev);
652 else
653 nv84_fence_create(dev);
654 break;
655 case NV_C0:
656 case NV_D0:
657 case NV_E0:
658 nvc0_fence_create(dev);
659 break;
660 default:
661 break;
662 }
663
664 switch (dev_priv->card_type) {
665 case NV_04:
666 case NV_10:
667 case NV_20:
668 case NV_30:
669 case NV_40:
670 nv04_software_create(dev);
671 break;
672 case NV_50:
673 nv50_software_create(dev);
674 break;
675 case NV_C0:
676 case NV_D0:
677 case NV_E0:
678 nvc0_software_create(dev);
679 break;
680 default:
681 break;
682 }
683
684 switch (dev_priv->card_type) {
685 case NV_04:
686 nv04_graph_create(dev);
687 break;
688 case NV_10:
689 nv10_graph_create(dev);
690 break;
691 case NV_20:
692 case NV_30:
693 nv20_graph_create(dev);
694 break;
695 case NV_40:
696 nv40_graph_create(dev);
697 break;
698 case NV_50:
699 nv50_graph_create(dev);
700 break;
701 case NV_C0:
702 case NV_D0:
703 nvc0_graph_create(dev);
704 break;
705 case NV_E0:
706 nve0_graph_create(dev);
707 break;
708 default:
709 break;
710 }
711
712 switch (dev_priv->chipset) {
713 case 0x84:
714 case 0x86:
715 case 0x92:
716 case 0x94:
717 case 0x96:
718 case 0xa0:
719 nv84_crypt_create(dev);
720 break;
721 case 0x98:
722 case 0xaa:
723 case 0xac:
724 nv98_crypt_create(dev);
725 break;
726 }
727
728 switch (dev_priv->card_type) {
729 case NV_50:
730 switch (dev_priv->chipset) {
731 case 0xa3:
732 case 0xa5:
733 case 0xa8:
734 nva3_copy_create(dev);
735 break;
736 }
737 break;
738 case NV_C0:
739 if (!(nv_rd32(dev, 0x022500) & 0x00000200))
740 nvc0_copy_create(dev, 1);
741 case NV_D0:
742 if (!(nv_rd32(dev, 0x022500) & 0x00000100))
743 nvc0_copy_create(dev, 0);
744 break;
745 default:
746 break;
747 }
748
749 if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
750 nv84_bsp_create(dev);
751 nv84_vp_create(dev);
752 nv98_ppp_create(dev);
753 } else
754 if (dev_priv->chipset >= 0x84) {
755 nv50_mpeg_create(dev);
756 nv84_bsp_create(dev);
757 nv84_vp_create(dev);
758 } else
759 if (dev_priv->chipset >= 0x50) {
760 nv50_mpeg_create(dev);
761 } else
762 if (dev_priv->card_type == NV_40 ||
763 dev_priv->chipset == 0x31 ||
764 dev_priv->chipset == 0x34 ||
765 dev_priv->chipset == 0x36) {
766 nv31_mpeg_create(dev);
767 }
768
769 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
770 if (dev_priv->eng[e]) {
771 ret = dev_priv->eng[e]->init(dev, e);
772 if (ret)
773 goto out_engine;
774 }
775 }
776 }
777
778 ret = nouveau_irq_init(dev);
779 if (ret)
780 goto out_engine;
781
782 ret = nouveau_display_create(dev);
783 if (ret)
784 goto out_irq;
785
786 nouveau_backlight_init(dev);
787 nouveau_pm_init(dev);
788
789 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
790 ret = nouveau_card_channel_init(dev);
791 if (ret)
792 goto out_pm;
793 }
794
795 if (dev->mode_config.num_crtc) {
796 ret = nouveau_display_init(dev);
797 if (ret)
798 goto out_chan;
799
800 nouveau_fbcon_init(dev);
801 }
802
803 return 0;
804
805out_chan:
806 nouveau_card_channel_fini(dev);
807out_pm:
808 nouveau_pm_fini(dev);
809 nouveau_backlight_exit(dev);
810 nouveau_display_destroy(dev);
811out_irq:
812 nouveau_irq_fini(dev);
813out_engine:
814 if (!dev_priv->noaccel) {
815 for (e = e - 1; e >= 0; e--) {
816 if (!dev_priv->eng[e])
817 continue;
818 dev_priv->eng[e]->fini(dev, e, false);
819 dev_priv->eng[e]->destroy(dev,e );
820 }
821 }
822 nouveau_mem_gart_fini(dev);
823out_ttmvram:
824 nouveau_mem_vram_fini(dev);
825out_instmem:
826 engine->instmem.takedown(dev);
827out_gpuobj:
828 nouveau_gpuobj_takedown(dev);
829out_gpio:
830 nouveau_gpio_destroy(dev);
831out_vram:
832 engine->vram.takedown(dev);
833out_fb:
834 engine->fb.takedown(dev);
835out_timer:
836 engine->timer.takedown(dev);
837out_mc:
838 engine->mc.takedown(dev);
839out_bios:
840 nouveau_bios_takedown(dev);
841out_display_early:
842 engine->display.late_takedown(dev);
843out:
844 vga_switcheroo_unregister_client(dev->pdev);
845 vga_client_register(dev->pdev, NULL, NULL, NULL);
846 return ret;
847}
848
849static void nouveau_card_takedown(struct drm_device *dev)
850{
851 struct drm_nouveau_private *dev_priv = dev->dev_private;
852 struct nouveau_engine *engine = &dev_priv->engine;
853 int e;
854
855 if (dev->mode_config.num_crtc) {
856 nouveau_fbcon_fini(dev);
857 nouveau_display_fini(dev);
858 }
859
860 nouveau_card_channel_fini(dev);
861 nouveau_pm_fini(dev);
862 nouveau_backlight_exit(dev);
863 nouveau_display_destroy(dev);
864
865 if (!dev_priv->noaccel) {
866 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
867 if (dev_priv->eng[e]) {
868 dev_priv->eng[e]->fini(dev, e, false);
869 dev_priv->eng[e]->destroy(dev,e );
870 }
871 }
872 }
873
874 if (dev_priv->vga_ram) {
875 nouveau_bo_unpin(dev_priv->vga_ram);
876 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
877 }
878
879 mutex_lock(&dev->struct_mutex);
880 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
881 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
882 mutex_unlock(&dev->struct_mutex);
883 nouveau_mem_gart_fini(dev);
884 nouveau_mem_vram_fini(dev);
885
886 engine->instmem.takedown(dev);
887 nouveau_gpuobj_takedown(dev);
888
889 nouveau_gpio_destroy(dev);
890 engine->vram.takedown(dev);
891 engine->fb.takedown(dev);
892 engine->timer.takedown(dev);
893 engine->mc.takedown(dev);
894
895 nouveau_bios_takedown(dev);
896 engine->display.late_takedown(dev);
897
898 nouveau_irq_fini(dev);
899
900 vga_switcheroo_unregister_client(dev->pdev);
901 vga_client_register(dev->pdev, NULL, NULL, NULL);
902}
903
904int
905nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
906{
907 struct drm_nouveau_private *dev_priv = dev->dev_private;
908 struct nouveau_fpriv *fpriv;
909 int ret;
910
911 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
912 if (unlikely(!fpriv))
913 return -ENOMEM;
914
915 spin_lock_init(&fpriv->lock);
916 INIT_LIST_HEAD(&fpriv->channels);
917
918 if (dev_priv->card_type == NV_50) {
919 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
920 &fpriv->vm);
921 if (ret) {
922 kfree(fpriv);
923 return ret;
924 }
925 } else
926 if (dev_priv->card_type >= NV_C0) {
927 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
928 &fpriv->vm);
929 if (ret) {
930 kfree(fpriv);
931 return ret;
932 }
933 }
934
935 file_priv->driver_priv = fpriv;
936 return 0;
937}
938
939/* here a client dies, release the stuff that was allocated for its
940 * file_priv */
941void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
942{
943 nouveau_channel_cleanup(dev, file_priv);
944}
945
946void
947nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
948{
949 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
950 nouveau_vm_ref(NULL, &fpriv->vm, NULL);
951 kfree(fpriv);
952}
953
954/* first module load, setup the mmio/fb mapping */
955/* KMS: we need mmio at load time, not when the first drm client opens. */
956int nouveau_firstopen(struct drm_device *dev)
957{
958 return 0;
959}
960
961/* if we have an OF card, copy vbios to RAMIN */
962static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
963{
964#if defined(__powerpc__)
965 int size, i;
966 const uint32_t *bios;
967 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
968 if (!dn) {
969 NV_INFO(dev, "Unable to get the OF node\n");
970 return;
971 }
972
973 bios = of_get_property(dn, "NVDA,BMP", &size);
974 if (bios) {
975 for (i = 0; i < size; i += 4)
976 nv_wi32(dev, i, bios[i/4]);
977 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
978 } else {
979 NV_INFO(dev, "Unable to get the OF bios\n");
980 }
981#endif
982}
983
984static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
985{
986 struct pci_dev *pdev = dev->pdev;
987 struct apertures_struct *aper = alloc_apertures(3);
988 if (!aper)
989 return NULL;
990
991 aper->ranges[0].base = pci_resource_start(pdev, 1);
992 aper->ranges[0].size = pci_resource_len(pdev, 1);
993 aper->count = 1;
994
995 if (pci_resource_len(pdev, 2)) {
996 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
997 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
998 aper->count++;
999 }
1000
1001 if (pci_resource_len(pdev, 3)) {
1002 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
1003 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
1004 aper->count++;
1005 }
1006
1007 return aper;
1008}
1009
1010static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
1011{
1012 struct drm_nouveau_private *dev_priv = dev->dev_private;
1013 bool primary = false;
1014 dev_priv->apertures = nouveau_get_apertures(dev);
1015 if (!dev_priv->apertures)
1016 return -ENOMEM;
1017
1018#ifdef CONFIG_X86
1019 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1020#endif
1021
1022 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
1023 return 0;
1024}
1025
1026int nouveau_load(struct drm_device *dev, unsigned long flags)
1027{
1028 struct drm_nouveau_private *dev_priv;
1029 unsigned long long offset, length;
1030 uint32_t reg0 = ~0, strap;
1031 int ret;
1032
1033 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1034 if (!dev_priv) {
1035 ret = -ENOMEM;
1036 goto err_out;
1037 }
1038 dev->dev_private = dev_priv;
1039 dev_priv->dev = dev;
1040
1041 pci_set_master(dev->pdev);
1042
1043 dev_priv->flags = flags & NOUVEAU_FLAGS;
1044
1045 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
1046 dev->pci_vendor, dev->pci_device, dev->pdev->class);
1047
1048 /* first up, map the start of mmio and determine the chipset */
1049 dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
1050 if (dev_priv->mmio) {
1051#ifdef __BIG_ENDIAN
1052 /* put the card into big-endian mode if it's not */
1053 if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
1054 nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
1055 DRM_MEMORYBARRIER();
1056#endif
1057
1058 /* determine chipset and derive architecture from it */
1059 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
1060 if ((reg0 & 0x0f000000) > 0) {
1061 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
1062 switch (dev_priv->chipset & 0xf0) {
1063 case 0x10:
1064 case 0x20:
1065 case 0x30:
1066 dev_priv->card_type = dev_priv->chipset & 0xf0;
1067 break;
1068 case 0x40:
1069 case 0x60:
1070 dev_priv->card_type = NV_40;
1071 break;
1072 case 0x50:
1073 case 0x80:
1074 case 0x90:
1075 case 0xa0:
1076 dev_priv->card_type = NV_50;
1077 break;
1078 case 0xc0:
1079 dev_priv->card_type = NV_C0;
1080 break;
1081 case 0xd0:
1082 dev_priv->card_type = NV_D0;
1083 break;
1084 case 0xe0:
1085 dev_priv->card_type = NV_E0;
1086 break;
1087 default:
1088 break;
1089 }
1090 } else
1091 if ((reg0 & 0xff00fff0) == 0x20004000) {
1092 if (reg0 & 0x00f00000)
1093 dev_priv->chipset = 0x05;
1094 else
1095 dev_priv->chipset = 0x04;
1096 dev_priv->card_type = NV_04;
1097 }
1098
1099 iounmap(dev_priv->mmio);
1100 }
1101
1102 if (!dev_priv->card_type) {
1103 NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
1104 ret = -EINVAL;
1105 goto err_priv;
1106 }
1107
1108 NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
1109 dev_priv->card_type, reg0);
1110
1111 /* map the mmio regs, limiting the amount to preserve vmap space */
1112 offset = pci_resource_start(dev->pdev, 0);
1113 length = pci_resource_len(dev->pdev, 0);
1114 if (dev_priv->card_type < NV_E0)
1115 length = min(length, (unsigned long long)0x00800000);
1116
1117 dev_priv->mmio = ioremap(offset, length);
1118 if (!dev_priv->mmio) {
1119 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
1120 "Please report your setup to " DRIVER_EMAIL "\n");
1121 ret = -EINVAL;
1122 goto err_priv;
1123 }
1124 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", offset);
1125
1126 /* determine frequency of timing crystal */
1127 strap = nv_rd32(dev, 0x101000);
1128 if ( dev_priv->chipset < 0x17 ||
1129 (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
1130 strap &= 0x00000040;
1131 else
1132 strap &= 0x00400040;
1133
1134 switch (strap) {
1135 case 0x00000000: dev_priv->crystal = 13500; break;
1136 case 0x00000040: dev_priv->crystal = 14318; break;
1137 case 0x00400000: dev_priv->crystal = 27000; break;
1138 case 0x00400040: dev_priv->crystal = 25000; break;
1139 }
1140
1141 NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
1142
1143 /* Determine whether we'll attempt acceleration or not, some
1144 * cards are disabled by default here due to them being known
1145 * non-functional, or never been tested due to lack of hw.
1146 */
1147 dev_priv->noaccel = !!nouveau_noaccel;
1148 if (nouveau_noaccel == -1) {
1149 switch (dev_priv->chipset) {
1150 case 0xd9: /* known broken */
1151 case 0xe4: /* needs binary driver firmware */
1152 case 0xe7: /* needs binary driver firmware */
1153 NV_INFO(dev, "acceleration disabled by default, pass "
1154 "noaccel=0 to force enable\n");
1155 dev_priv->noaccel = true;
1156 break;
1157 default:
1158 dev_priv->noaccel = false;
1159 break;
1160 }
1161 }
1162
1163 ret = nouveau_remove_conflicting_drivers(dev);
1164 if (ret)
1165 goto err_mmio;
1166
1167 /* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
1168 if (dev_priv->card_type >= NV_40) {
1169 int ramin_bar = 2;
1170 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
1171 ramin_bar = 3;
1172
1173 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
1174 dev_priv->ramin =
1175 ioremap(pci_resource_start(dev->pdev, ramin_bar),
1176 dev_priv->ramin_size);
1177 if (!dev_priv->ramin) {
1178 NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
1179 ret = -ENOMEM;
1180 goto err_mmio;
1181 }
1182 } else {
1183 dev_priv->ramin_size = 1 * 1024 * 1024;
1184 dev_priv->ramin = ioremap(offset + NV_RAMIN,
1185 dev_priv->ramin_size);
1186 if (!dev_priv->ramin) {
1187 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
1188 ret = -ENOMEM;
1189 goto err_mmio;
1190 }
1191 }
1192
1193 nouveau_OF_copy_vbios_to_ramin(dev);
1194
1195 /* Special flags */
1196 if (dev->pci_device == 0x01a0)
1197 dev_priv->flags |= NV_NFORCE;
1198 else if (dev->pci_device == 0x01f0)
1199 dev_priv->flags |= NV_NFORCE2;
1200
1201 /* For kernel modesetting, init card now and bring up fbcon */
1202 ret = nouveau_card_init(dev);
1203 if (ret)
1204 goto err_ramin;
1205
1206 return 0;
1207
1208err_ramin:
1209 iounmap(dev_priv->ramin);
1210err_mmio:
1211 iounmap(dev_priv->mmio);
1212err_priv:
1213 kfree(dev_priv);
1214 dev->dev_private = NULL;
1215err_out:
1216 return ret;
1217}
1218
1219void nouveau_lastclose(struct drm_device *dev)
1220{
1221 vga_switcheroo_process_delayed_switch();
1222}
1223
1224int nouveau_unload(struct drm_device *dev)
1225{
1226 struct drm_nouveau_private *dev_priv = dev->dev_private;
1227
1228 nouveau_card_takedown(dev);
1229
1230 iounmap(dev_priv->mmio);
1231 iounmap(dev_priv->ramin);
1232
1233 kfree(dev_priv);
1234 dev->dev_private = NULL;
1235 return 0;
1236}
1237
1238/* Wait until (value(reg) & mask) == val, up until timeout has hit */
1239bool
1240nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
1241 uint32_t reg, uint32_t mask, uint32_t val)
1242{
1243 struct drm_nouveau_private *dev_priv = dev->dev_private;
1244 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1245 uint64_t start = ptimer->read(dev);
1246
1247 do {
1248 if ((nv_rd32(dev, reg) & mask) == val)
1249 return true;
1250 } while (ptimer->read(dev) - start < timeout);
1251
1252 return false;
1253}
1254
1255/* Wait until (value(reg) & mask) != val, up until timeout has hit */
1256bool
1257nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
1258 uint32_t reg, uint32_t mask, uint32_t val)
1259{
1260 struct drm_nouveau_private *dev_priv = dev->dev_private;
1261 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1262 uint64_t start = ptimer->read(dev);
1263
1264 do {
1265 if ((nv_rd32(dev, reg) & mask) != val)
1266 return true;
1267 } while (ptimer->read(dev) - start < timeout);
1268
1269 return false;
1270}
1271
1272/* Wait until cond(data) == true, up until timeout has hit */
1273bool
1274nouveau_wait_cb(struct drm_device *dev, u64 timeout,
1275 bool (*cond)(void *), void *data)
1276{
1277 struct drm_nouveau_private *dev_priv = dev->dev_private;
1278 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1279 u64 start = ptimer->read(dev);
1280
1281 do {
1282 if (cond(data) == true)
1283 return true;
1284 } while (ptimer->read(dev) - start < timeout);
1285
1286 return false;
1287}
1288
1289/* Waits for PGRAPH to go completely idle */
1290bool nouveau_wait_for_idle(struct drm_device *dev)
1291{
1292 struct drm_nouveau_private *dev_priv = dev->dev_private;
1293 uint32_t mask = ~0;
1294
1295 if (dev_priv->card_type == NV_40)
1296 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1297
1298 if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
1299 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
1300 nv_rd32(dev, NV04_PGRAPH_STATUS));
1301 return false;
1302 }
1303
1304 return true;
1305}
1306
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
deleted file mode 100644
index 0f5a30160556..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * Copyright 2010 PathScale inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <linux/module.h>
26
27#include "drmP.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_pm.h"
31
32static void
33nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
37 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
38 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
39 int i, headerlen, recordlen, entries;
40
41 if (!temp) {
42 NV_DEBUG(dev, "temperature table pointer invalid\n");
43 return;
44 }
45
46 /* Set the default sensor's contants */
47 sensor->offset_constant = 0;
48 sensor->offset_mult = 0;
49 sensor->offset_div = 1;
50 sensor->slope_mult = 1;
51 sensor->slope_div = 1;
52
53 /* Set the default temperature thresholds */
54 temps->critical = 110;
55 temps->down_clock = 100;
56 temps->fan_boost = 90;
57
58 /* Set the default range for the pwm fan */
59 pm->fan.min_duty = 30;
60 pm->fan.max_duty = 100;
61
62 /* Set the known default values to setup the temperature sensor */
63 if (dev_priv->card_type >= NV_40) {
64 switch (dev_priv->chipset) {
65 case 0x43:
66 sensor->offset_mult = 32060;
67 sensor->offset_div = 1000;
68 sensor->slope_mult = 792;
69 sensor->slope_div = 1000;
70 break;
71
72 case 0x44:
73 case 0x47:
74 case 0x4a:
75 sensor->offset_mult = 27839;
76 sensor->offset_div = 1000;
77 sensor->slope_mult = 780;
78 sensor->slope_div = 1000;
79 break;
80
81 case 0x46:
82 sensor->offset_mult = -24775;
83 sensor->offset_div = 100;
84 sensor->slope_mult = 467;
85 sensor->slope_div = 10000;
86 break;
87
88 case 0x49:
89 sensor->offset_mult = -25051;
90 sensor->offset_div = 100;
91 sensor->slope_mult = 458;
92 sensor->slope_div = 10000;
93 break;
94
95 case 0x4b:
96 sensor->offset_mult = -24088;
97 sensor->offset_div = 100;
98 sensor->slope_mult = 442;
99 sensor->slope_div = 10000;
100 break;
101
102 case 0x50:
103 sensor->offset_mult = -22749;
104 sensor->offset_div = 100;
105 sensor->slope_mult = 431;
106 sensor->slope_div = 10000;
107 break;
108
109 case 0x67:
110 sensor->offset_mult = -26149;
111 sensor->offset_div = 100;
112 sensor->slope_mult = 484;
113 sensor->slope_div = 10000;
114 break;
115 }
116 }
117
118 headerlen = temp[1];
119 recordlen = temp[2];
120 entries = temp[3];
121 temp = temp + headerlen;
122
123 /* Read the entries from the table */
124 for (i = 0; i < entries; i++) {
125 s16 value = ROM16(temp[1]);
126
127 switch (temp[0]) {
128 case 0x01:
129 if ((value & 0x8f) == 0)
130 sensor->offset_constant = (value >> 9) & 0x7f;
131 break;
132
133 case 0x04:
134 if ((value & 0xf00f) == 0xa000) /* core */
135 temps->critical = (value&0x0ff0) >> 4;
136 break;
137
138 case 0x07:
139 if ((value & 0xf00f) == 0xa000) /* core */
140 temps->down_clock = (value&0x0ff0) >> 4;
141 break;
142
143 case 0x08:
144 if ((value & 0xf00f) == 0xa000) /* core */
145 temps->fan_boost = (value&0x0ff0) >> 4;
146 break;
147
148 case 0x10:
149 sensor->offset_mult = value;
150 break;
151
152 case 0x11:
153 sensor->offset_div = value;
154 break;
155
156 case 0x12:
157 sensor->slope_mult = value;
158 break;
159
160 case 0x13:
161 sensor->slope_div = value;
162 break;
163 case 0x22:
164 pm->fan.min_duty = value & 0xff;
165 pm->fan.max_duty = (value & 0xff00) >> 8;
166 break;
167 case 0x26:
168 pm->fan.pwm_freq = value;
169 break;
170 }
171 temp += recordlen;
172 }
173
174 nouveau_temp_safety_checks(dev);
175
176 /* check the fan min/max settings */
177 if (pm->fan.min_duty < 10)
178 pm->fan.min_duty = 10;
179 if (pm->fan.max_duty > 100)
180 pm->fan.max_duty = 100;
181 if (pm->fan.max_duty < pm->fan.min_duty)
182 pm->fan.max_duty = pm->fan.min_duty;
183}
184
185static int
186nv40_sensor_setup(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
190 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
191 s32 offset = sensor->offset_mult / sensor->offset_div;
192 s32 sensor_calibration;
193
194 /* set up the sensors */
195 sensor_calibration = 120 - offset - sensor->offset_constant;
196 sensor_calibration = sensor_calibration * sensor->slope_div /
197 sensor->slope_mult;
198
199 if (dev_priv->chipset >= 0x46)
200 sensor_calibration |= 0x80000000;
201 else
202 sensor_calibration |= 0x10000000;
203
204 nv_wr32(dev, 0x0015b0, sensor_calibration);
205
206 /* Wait for the sensor to update */
207 msleep(5);
208
209 /* read */
210 return nv_rd32(dev, 0x0015b4) & 0x1fff;
211}
212
213int
214nv40_temp_get(struct drm_device *dev)
215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
218 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
219 int offset = sensor->offset_mult / sensor->offset_div;
220 int core_temp;
221
222 if (dev_priv->card_type >= NV_50) {
223 core_temp = nv_rd32(dev, 0x20008);
224 } else {
225 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
226 /* Setup the sensor if the temperature is 0 */
227 if (core_temp == 0)
228 core_temp = nv40_sensor_setup(dev);
229 }
230
231 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
232 core_temp = core_temp + offset + sensor->offset_constant;
233
234 return core_temp;
235}
236
237int
238nv84_temp_get(struct drm_device *dev)
239{
240 return nv_rd32(dev, 0x20400);
241}
242
243void
244nouveau_temp_safety_checks(struct drm_device *dev)
245{
246 struct drm_nouveau_private *dev_priv = dev->dev_private;
247 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
248 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
249
250 if (temps->critical > 120)
251 temps->critical = 120;
252 else if (temps->critical < 80)
253 temps->critical = 80;
254
255 if (temps->down_clock > 110)
256 temps->down_clock = 110;
257 else if (temps->down_clock < 60)
258 temps->down_clock = 60;
259
260 if (temps->fan_boost > 100)
261 temps->fan_boost = 100;
262 else if (temps->fan_boost < 40)
263 temps->fan_boost = 40;
264}
265
266static bool
267probe_monitoring_device(struct nouveau_i2c_chan *i2c,
268 struct i2c_board_info *info)
269{
270 struct i2c_client *client;
271
272 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
273
274 client = i2c_new_device(&i2c->adapter, info);
275 if (!client)
276 return false;
277
278 if (!client->driver || client->driver->detect(client, info)) {
279 i2c_unregister_device(client);
280 return false;
281 }
282
283 return true;
284}
285
286static void
287nouveau_temp_probe_i2c(struct drm_device *dev)
288{
289 struct i2c_board_info info[] = {
290 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
291 { I2C_BOARD_INFO("w83781d", 0x2d) },
292 { I2C_BOARD_INFO("adt7473", 0x2e) },
293 { I2C_BOARD_INFO("f75375", 0x2e) },
294 { I2C_BOARD_INFO("lm99", 0x4c) },
295 { }
296 };
297
298 nouveau_i2c_identify(dev, "monitoring device", info,
299 probe_monitoring_device, NV_I2C_DEFAULT(0));
300}
301
302void
303nouveau_temp_init(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nvbios *bios = &dev_priv->vbios;
307 struct bit_entry P;
308 u8 *temp = NULL;
309
310 if (bios->type == NVBIOS_BIT) {
311 if (bit_table(dev, 'P', &P))
312 return;
313
314 if (P.version == 1)
315 temp = ROMPTR(dev, P.data[12]);
316 else if (P.version == 2)
317 temp = ROMPTR(dev, P.data[16]);
318 else
319 NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
320
321 nouveau_temp_vbios_parse(dev, temp);
322 }
323
324 nouveau_temp_probe_i2c(dev);
325}
326
327void
328nouveau_temp_fini(struct drm_device *dev)
329{
330
331}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bd35f930568c..9be9cb58e19b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,21 +24,253 @@
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include "drmP.h" 27#include <subdev/fb.h>
28#include <subdev/vm.h>
29#include <subdev/instmem.h>
28 30
29#include "nouveau_drv.h" 31#include "nouveau_drm.h"
32#include "nouveau_ttm.h"
33#include "nouveau_gem.h"
34
35static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{
38 /* nothing to do */
39 return 0;
40}
41
42static int
43nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
44{
45 /* nothing to do */
46 return 0;
47}
48
49static inline void
50nouveau_mem_node_cleanup(struct nouveau_mem *node)
51{
52 if (node->vma[0].node) {
53 nouveau_vm_unmap(&node->vma[0]);
54 nouveau_vm_put(&node->vma[0]);
55 }
56
57 if (node->vma[1].node) {
58 nouveau_vm_unmap(&node->vma[1]);
59 nouveau_vm_put(&node->vma[1]);
60 }
61}
62
63static void
64nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
65 struct ttm_mem_reg *mem)
66{
67 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
68 struct nouveau_fb *pfb = nouveau_fb(drm->device);
69 nouveau_mem_node_cleanup(mem->mm_node);
70 pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
71}
72
73static int
74nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
75 struct ttm_buffer_object *bo,
76 struct ttm_placement *placement,
77 struct ttm_mem_reg *mem)
78{
79 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
80 struct nouveau_fb *pfb = nouveau_fb(drm->device);
81 struct nouveau_bo *nvbo = nouveau_bo(bo);
82 struct nouveau_mem *node;
83 u32 size_nc = 0;
84 int ret;
85
86 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
87 size_nc = 1 << nvbo->page_shift;
88
89 ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
90 mem->page_alignment << PAGE_SHIFT, size_nc,
91 (nvbo->tile_flags >> 8) & 0x3ff, &node);
92 if (ret) {
93 mem->mm_node = NULL;
94 return (ret == -ENOSPC) ? 0 : ret;
95 }
96
97 node->page_shift = nvbo->page_shift;
98
99 mem->mm_node = node;
100 mem->start = node->offset >> PAGE_SHIFT;
101 return 0;
102}
103
104static void
105nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
106{
107 struct nouveau_mm *mm = man->priv;
108 struct nouveau_mm_node *r;
109 u32 total = 0, free = 0;
110
111 mutex_lock(&mm->mutex);
112 list_for_each_entry(r, &mm->nodes, nl_entry) {
113 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
114 prefix, r->type, ((u64)r->offset << 12),
115 (((u64)r->offset + r->length) << 12));
116
117 total += r->length;
118 if (!r->type)
119 free += r->length;
120 }
121 mutex_unlock(&mm->mutex);
122
123 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
124 prefix, (u64)total << 12, (u64)free << 12);
125 printk(KERN_DEBUG "%s block: 0x%08x\n",
126 prefix, mm->block_size << 12);
127}
128
129const struct ttm_mem_type_manager_func nouveau_vram_manager = {
130 nouveau_vram_manager_init,
131 nouveau_vram_manager_fini,
132 nouveau_vram_manager_new,
133 nouveau_vram_manager_del,
134 nouveau_vram_manager_debug
135};
136
137static int
138nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
139{
140 return 0;
141}
142
143static int
144nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
145{
146 return 0;
147}
148
149static void
150nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
151 struct ttm_mem_reg *mem)
152{
153 nouveau_mem_node_cleanup(mem->mm_node);
154 kfree(mem->mm_node);
155 mem->mm_node = NULL;
156}
157
158static int
159nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *bo,
161 struct ttm_placement *placement,
162 struct ttm_mem_reg *mem)
163{
164 struct nouveau_mem *node;
165
166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
167 return -ENOMEM;
168
169 node = kzalloc(sizeof(*node), GFP_KERNEL);
170 if (!node)
171 return -ENOMEM;
172 node->page_shift = 12;
173
174 mem->mm_node = node;
175 mem->start = 0;
176 return 0;
177}
178
179static void
180nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
181{
182}
183
184const struct ttm_mem_type_manager_func nouveau_gart_manager = {
185 nouveau_gart_manager_init,
186 nouveau_gart_manager_fini,
187 nouveau_gart_manager_new,
188 nouveau_gart_manager_del,
189 nouveau_gart_manager_debug
190};
191
192#include <core/subdev/vm/nv04.h>
193static int
194nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
195{
196 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
197 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
198 struct nv04_vmmgr_priv *priv = (void *)vmm;
199 struct nouveau_vm *vm = NULL;
200 nouveau_vm_ref(priv->vm, &vm, NULL);
201 man->priv = vm;
202 return 0;
203}
204
205static int
206nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
207{
208 struct nouveau_vm *vm = man->priv;
209 nouveau_vm_ref(NULL, &vm, NULL);
210 man->priv = NULL;
211 return 0;
212}
213
214static void
215nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
216{
217 struct nouveau_mem *node = mem->mm_node;
218 if (node->vma[0].node)
219 nouveau_vm_put(&node->vma[0]);
220 kfree(mem->mm_node);
221 mem->mm_node = NULL;
222}
223
224static int
225nv04_gart_manager_new(struct ttm_mem_type_manager *man,
226 struct ttm_buffer_object *bo,
227 struct ttm_placement *placement,
228 struct ttm_mem_reg *mem)
229{
230 struct nouveau_mem *node;
231 int ret;
232
233 node = kzalloc(sizeof(*node), GFP_KERNEL);
234 if (!node)
235 return -ENOMEM;
236
237 node->page_shift = 12;
238
239 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
240 NV_MEM_ACCESS_RW, &node->vma[0]);
241 if (ret) {
242 kfree(node);
243 return ret;
244 }
245
246 mem->mm_node = node;
247 mem->start = node->vma[0].offset >> PAGE_SHIFT;
248 return 0;
249}
250
251static void
252nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
253{
254}
255
256const struct ttm_mem_type_manager_func nv04_gart_manager = {
257 nv04_gart_manager_init,
258 nv04_gart_manager_fini,
259 nv04_gart_manager_new,
260 nv04_gart_manager_del,
261 nv04_gart_manager_debug
262};
30 263
31int 264int
32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 265nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
33{ 266{
34 struct drm_file *file_priv = filp->private_data; 267 struct drm_file *file_priv = filp->private_data;
35 struct drm_nouveau_private *dev_priv = 268 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
36 file_priv->minor->dev->dev_private;
37 269
38 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 270 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
39 return drm_mmap(filp, vma); 271 return drm_mmap(filp, vma);
40 272
41 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); 273 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
42} 274}
43 275
44static int 276static int
@@ -54,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
54} 286}
55 287
56int 288int
57nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) 289nouveau_ttm_global_init(struct nouveau_drm *drm)
58{ 290{
59 struct drm_global_reference *global_ref; 291 struct drm_global_reference *global_ref;
60 int ret; 292 int ret;
61 293
62 global_ref = &dev_priv->ttm.mem_global_ref; 294 global_ref = &drm->ttm.mem_global_ref;
63 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 295 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
64 global_ref->size = sizeof(struct ttm_mem_global); 296 global_ref->size = sizeof(struct ttm_mem_global);
65 global_ref->init = &nouveau_ttm_mem_global_init; 297 global_ref->init = &nouveau_ttm_mem_global_init;
@@ -68,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
68 ret = drm_global_item_ref(global_ref); 300 ret = drm_global_item_ref(global_ref);
69 if (unlikely(ret != 0)) { 301 if (unlikely(ret != 0)) {
70 DRM_ERROR("Failed setting up TTM memory accounting\n"); 302 DRM_ERROR("Failed setting up TTM memory accounting\n");
71 dev_priv->ttm.mem_global_ref.release = NULL; 303 drm->ttm.mem_global_ref.release = NULL;
72 return ret; 304 return ret;
73 } 305 }
74 306
75 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; 307 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
76 global_ref = &dev_priv->ttm.bo_global_ref.ref; 308 global_ref = &drm->ttm.bo_global_ref.ref;
77 global_ref->global_type = DRM_GLOBAL_TTM_BO; 309 global_ref->global_type = DRM_GLOBAL_TTM_BO;
78 global_ref->size = sizeof(struct ttm_bo_global); 310 global_ref->size = sizeof(struct ttm_bo_global);
79 global_ref->init = &ttm_bo_global_init; 311 global_ref->init = &ttm_bo_global_init;
@@ -82,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
82 ret = drm_global_item_ref(global_ref); 314 ret = drm_global_item_ref(global_ref);
83 if (unlikely(ret != 0)) { 315 if (unlikely(ret != 0)) {
84 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 316 DRM_ERROR("Failed setting up TTM BO subsystem\n");
85 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 317 drm_global_item_unref(&drm->ttm.mem_global_ref);
86 dev_priv->ttm.mem_global_ref.release = NULL; 318 drm->ttm.mem_global_ref.release = NULL;
87 return ret; 319 return ret;
88 } 320 }
89 321
@@ -91,13 +323,101 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
91} 323}
92 324
93void 325void
94nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) 326nouveau_ttm_global_release(struct nouveau_drm *drm)
95{ 327{
96 if (dev_priv->ttm.mem_global_ref.release == NULL) 328 if (drm->ttm.mem_global_ref.release == NULL)
97 return; 329 return;
98 330
99 drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); 331 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
100 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 332 drm_global_item_unref(&drm->ttm.mem_global_ref);
101 dev_priv->ttm.mem_global_ref.release = NULL; 333 drm->ttm.mem_global_ref.release = NULL;
102} 334}
103 335
336int
337nouveau_ttm_init(struct nouveau_drm *drm)
338{
339 struct drm_device *dev = drm->dev;
340 u32 bits;
341 int ret;
342
343 bits = nouveau_vmmgr(drm->device)->dma_bits;
344 if ( drm->agp.stat == ENABLED ||
345 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
346 bits = 32;
347
348 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
349 if (ret)
350 return ret;
351
352 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
353 if (ret)
354 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
355
356 ret = nouveau_ttm_global_init(drm);
357 if (ret)
358 return ret;
359
360 ret = ttm_bo_device_init(&drm->ttm.bdev,
361 drm->ttm.bo_global_ref.ref.object,
362 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
363 bits <= 32 ? true : false);
364 if (ret) {
365 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
366 return ret;
367 }
368
369 /* VRAM init */
370 drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
371 drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
372
373 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
374 drm->gem.vram_available >> PAGE_SHIFT);
375 if (ret) {
376 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
377 return ret;
378 }
379
380 drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
381 pci_resource_len(dev->pdev, 1),
382 DRM_MTRR_WC);
383
384 /* GART init */
385 if (drm->agp.stat != ENABLED) {
386 drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
387 if (drm->gem.gart_available > 512 * 1024 * 1024)
388 drm->gem.gart_available = 512 * 1024 * 1024;
389 } else {
390 drm->gem.gart_available = drm->agp.size;
391 }
392
393 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
394 drm->gem.gart_available >> PAGE_SHIFT);
395 if (ret) {
396 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
397 return ret;
398 }
399
400 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
401 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
402 return 0;
403}
404
405void
406nouveau_ttm_fini(struct nouveau_drm *drm)
407{
408 mutex_lock(&drm->dev->struct_mutex);
409 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
410 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
411 mutex_unlock(&drm->dev->struct_mutex);
412
413 ttm_bo_device_release(&drm->ttm.bdev);
414
415 nouveau_ttm_global_release(drm);
416
417 if (drm->ttm.mtrr >= 0) {
418 drm_mtrr_del(drm->ttm.mtrr,
419 pci_resource_start(drm->dev->pdev, 1),
420 pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
421 drm->ttm.mtrr = -1;
422 }
423}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 000000000000..25b0de413352
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,25 @@
1#ifndef __NOUVEAU_TTM_H__
2#define __NOUVEAU_TTM_H__
3
4static inline struct nouveau_drm *
5nouveau_bdev(struct ttm_bo_device *bd)
6{
7 return container_of(bd, struct nouveau_drm, ttm.bdev);
8}
9
10extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
11extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
12extern const struct ttm_mem_type_manager_func nv04_gart_manager;
13
14struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
15 unsigned long size, u32 page_flags,
16 struct page *dummy_read_page);
17
18int nouveau_ttm_init(struct nouveau_drm *drm);
19void nouveau_ttm_fini(struct nouveau_drm *drm);
20int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
21
22int nouveau_ttm_global_init(struct nouveau_drm *);
23void nouveau_ttm_global_release(struct nouveau_drm *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
deleted file mode 100644
index b97719fbb739..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef __NOUVEAU_UTIL_H__
29#define __NOUVEAU_UTIL_H__
30
31struct nouveau_bitfield {
32 u32 mask;
33 const char *name;
34};
35
36struct nouveau_enum {
37 u32 value;
38 const char *name;
39 void *data;
40};
41
42void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
43void nouveau_enum_print(const struct nouveau_enum *, u32 value);
44const struct nouveau_enum *
45nouveau_enum_find(const struct nouveau_enum *, u32 value);
46
47int nouveau_ratelimit(void);
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
new file mode 100644
index 000000000000..7bf7d131eee0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -0,0 +1,99 @@
1#include <linux/vgaarb.h>
2#include <linux/vga_switcheroo.h>
3
4#include "drmP.h"
5#include "drm_crtc_helper.h"
6
7#include "nouveau_drm.h"
8#include "nouveau_acpi.h"
9#include "nouveau_fbcon.h"
10#include "nouveau_vga.h"
11
12static unsigned int
13nouveau_vga_set_decode(void *priv, bool state)
14{
15 struct nouveau_device *device = nouveau_dev(priv);
16
17 if (device->chipset >= 0x40)
18 nv_wr32(device, 0x088054, state);
19 else
20 nv_wr32(device, 0x001854, state);
21
22 if (state)
23 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
24 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
25 else
26 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
27}
28
29static void
30nouveau_switcheroo_set_state(struct pci_dev *pdev,
31 enum vga_switcheroo_state state)
32{
33 struct drm_device *dev = pci_get_drvdata(pdev);
34 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
35
36 if (state == VGA_SWITCHEROO_ON) {
37 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
38 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
39 nouveau_drm_resume(pdev);
40 drm_kms_helper_poll_enable(dev);
41 dev->switch_power_state = DRM_SWITCH_POWER_ON;
42 } else {
43 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
44 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
45 drm_kms_helper_poll_disable(dev);
46 nouveau_switcheroo_optimus_dsm();
47 nouveau_drm_suspend(pdev, pmm);
48 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
49 }
50}
51
52static void
53nouveau_switcheroo_reprobe(struct pci_dev *pdev)
54{
55 struct drm_device *dev = pci_get_drvdata(pdev);
56 nouveau_fbcon_output_poll_changed(dev);
57}
58
59static bool
60nouveau_switcheroo_can_switch(struct pci_dev *pdev)
61{
62 struct drm_device *dev = pci_get_drvdata(pdev);
63 bool can_switch;
64
65 spin_lock(&dev->count_lock);
66 can_switch = (dev->open_count == 0);
67 spin_unlock(&dev->count_lock);
68 return can_switch;
69}
70
71static const struct vga_switcheroo_client_ops
72nouveau_switcheroo_ops = {
73 .set_gpu_state = nouveau_switcheroo_set_state,
74 .reprobe = nouveau_switcheroo_reprobe,
75 .can_switch = nouveau_switcheroo_can_switch,
76};
77
78void
79nouveau_vga_init(struct nouveau_drm *drm)
80{
81 struct drm_device *dev = drm->dev;
82 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
83 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
84}
85
86void
87nouveau_vga_fini(struct nouveau_drm *drm)
88{
89 struct drm_device *dev = drm->dev;
90 vga_switcheroo_unregister_client(dev->pdev);
91 vga_client_register(dev->pdev, NULL, NULL, NULL);
92}
93
94
95void
96nouveau_vga_lastclose(struct drm_device *dev)
97{
98 vga_switcheroo_process_delayed_switch();
99}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
new file mode 100644
index 000000000000..ea3ad6974c65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -0,0 +1,8 @@
1#ifndef __NOUVEAU_VGA_H__
2#define __NOUVEAU_VGA_H__
3
4void nouveau_vga_init(struct nouveau_drm *);
5void nouveau_vga_fini(struct nouveau_drm *);
6void nouveau_vga_lastclose(struct drm_device *dev);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index b010cb997b34..c2cc8e2d6539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -24,18 +24,21 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
30 29
31static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 }; 30#include <subdev/bios/gpio.h>
31#include <subdev/gpio.h>
32
33static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
32static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); 34static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
33 35
34int 36int
35nouveau_voltage_gpio_get(struct drm_device *dev) 37nouveau_voltage_gpio_get(struct drm_device *dev)
36{ 38{
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 39 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
38 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 40 struct nouveau_device *device = nouveau_dev(dev);
41 struct nouveau_gpio *gpio = nouveau_gpio(device);
39 u8 vid = 0; 42 u8 vid = 0;
40 int i; 43 int i;
41 44
@@ -43,7 +46,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
43 if (!(volt->vid_mask & (1 << i))) 46 if (!(volt->vid_mask & (1 << i)))
44 continue; 47 continue;
45 48
46 vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i; 49 vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
47 } 50 }
48 51
49 return nouveau_volt_lvl_lookup(dev, vid); 52 return nouveau_volt_lvl_lookup(dev, vid);
@@ -52,8 +55,9 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
52int 55int
53nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) 56nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
54{ 57{
55 struct drm_nouveau_private *dev_priv = dev->dev_private; 58 struct nouveau_device *device = nouveau_dev(dev);
56 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 59 struct nouveau_gpio *gpio = nouveau_gpio(device);
60 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
57 int vid, i; 61 int vid, i;
58 62
59 vid = nouveau_volt_vid_lookup(dev, voltage); 63 vid = nouveau_volt_vid_lookup(dev, voltage);
@@ -64,7 +68,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
64 if (!(volt->vid_mask & (1 << i))) 68 if (!(volt->vid_mask & (1 << i)))
65 continue; 69 continue;
66 70
67 nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i))); 71 gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
68 } 72 }
69 73
70 return 0; 74 return 0;
@@ -73,8 +77,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
73int 77int
74nouveau_volt_vid_lookup(struct drm_device *dev, int voltage) 78nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
75{ 79{
76 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
77 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
78 int i; 81 int i;
79 82
80 for (i = 0; i < volt->nr_level; i++) { 83 for (i = 0; i < volt->nr_level; i++) {
@@ -88,8 +91,7 @@ nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
88int 91int
89nouveau_volt_lvl_lookup(struct drm_device *dev, int vid) 92nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
90{ 93{
91 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
92 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
93 int i; 95 int i;
94 96
95 for (i = 0; i < volt->nr_level; i++) { 97 for (i = 0; i < volt->nr_level; i++) {
@@ -103,10 +105,12 @@ nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
103void 105void
104nouveau_volt_init(struct drm_device *dev) 106nouveau_volt_init(struct drm_device *dev)
105{ 107{
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 108 struct nouveau_drm *drm = nouveau_drm(dev);
107 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 109 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
110 struct nouveau_pm *pm = nouveau_pm(dev);
108 struct nouveau_pm_voltage *voltage = &pm->voltage; 111 struct nouveau_pm_voltage *voltage = &pm->voltage;
109 struct nvbios *bios = &dev_priv->vbios; 112 struct nvbios *bios = &drm->vbios;
113 struct dcb_gpio_func func;
110 struct bit_entry P; 114 struct bit_entry P;
111 u8 *volt = NULL, *entry; 115 u8 *volt = NULL, *entry;
112 int i, headerlen, recordlen, entries, vidmask, vidshift; 116 int i, headerlen, recordlen, entries, vidmask, vidshift;
@@ -121,11 +125,11 @@ nouveau_volt_init(struct drm_device *dev)
121 if (P.version == 2) 125 if (P.version == 2)
122 volt = ROMPTR(dev, P.data[12]); 126 volt = ROMPTR(dev, P.data[12]);
123 else { 127 else {
124 NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); 128 NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
125 } 129 }
126 } else { 130 } else {
127 if (bios->data[bios->offset + 6] < 0x27) { 131 if (bios->data[bios->offset + 6] < 0x27) {
128 NV_DEBUG(dev, "BMP version too old for voltage\n"); 132 NV_DEBUG(drm, "BMP version too old for voltage\n");
129 return; 133 return;
130 } 134 }
131 135
@@ -133,7 +137,7 @@ nouveau_volt_init(struct drm_device *dev)
133 } 137 }
134 138
135 if (!volt) { 139 if (!volt) {
136 NV_DEBUG(dev, "voltage table pointer invalid\n"); 140 NV_DEBUG(drm, "voltage table pointer invalid\n");
137 return; 141 return;
138 } 142 }
139 143
@@ -177,7 +181,7 @@ nouveau_volt_init(struct drm_device *dev)
177 vidshift = 0; 181 vidshift = 0;
178 break; 182 break;
179 default: 183 default:
180 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); 184 NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
181 return; 185 return;
182 } 186 }
183 187
@@ -189,12 +193,12 @@ nouveau_volt_init(struct drm_device *dev)
189 i = 0; 193 i = 0;
190 while (vidmask) { 194 while (vidmask) {
191 if (i > nr_vidtag) { 195 if (i > nr_vidtag) {
192 NV_DEBUG(dev, "vid bit %d unknown\n", i); 196 NV_DEBUG(drm, "vid bit %d unknown\n", i);
193 return; 197 return;
194 } 198 }
195 199
196 if (!nouveau_gpio_func_valid(dev, vidtag[i])) { 200 if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
197 NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); 201 NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
198 return; 202 return;
199 } 203 }
200 204
@@ -240,8 +244,7 @@ nouveau_volt_init(struct drm_device *dev)
240void 244void
241nouveau_volt_fini(struct drm_device *dev) 245nouveau_volt_fini(struct drm_device *dev)
242{ 246{
243 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
244 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
245 248
246 kfree(volt->level); 249 kfree(volt->level);
247} 250}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 43accc11102f..8b8a9d3d9e8b 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -26,14 +26,20 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "drm_crtc_helper.h" 27#include "drm_crtc_helper.h"
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h"
31#include "nouveau_bo.h"
32#include "nouveau_gem.h"
30#include "nouveau_encoder.h" 33#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 34#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
33#include "nouveau_fb.h"
34#include "nouveau_hw.h" 36#include "nouveau_hw.h"
35#include "nvreg.h" 37#include "nvreg.h"
36#include "nouveau_fbcon.h" 38#include "nouveau_fbcon.h"
39#include "nv04_display.h"
40
41#include <subdev/bios/pll.h>
42#include <subdev/clock.h>
37 43
38static int 44static int
39nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 45nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -49,8 +55,8 @@ crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int in
49static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level) 55static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
50{ 56{
51 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 57 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
52 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 58 struct drm_device *dev = crtc->dev;
53 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 59 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
54 60
55 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level; 61 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
56 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) { 62 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
@@ -64,8 +70,8 @@ static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
64static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level) 70static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
65{ 71{
66 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 72 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
67 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 73 struct drm_device *dev = crtc->dev;
68 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 74 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
69 75
70 nv_crtc->sharpness = level; 76 nv_crtc->sharpness = level;
71 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */ 77 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
@@ -103,14 +109,17 @@ static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
103static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock) 109static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
104{ 110{
105 struct drm_device *dev = crtc->dev; 111 struct drm_device *dev = crtc->dev;
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 112 struct nouveau_drm *drm = nouveau_drm(dev);
113 struct nouveau_bios *bios = nouveau_bios(drm->device);
114 struct nouveau_clock *clk = nouveau_clock(drm->device);
107 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 115 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
108 struct nv04_mode_state *state = &dev_priv->mode_reg; 116 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
109 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; 117 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
110 struct nouveau_pll_vals *pv = &regp->pllvals; 118 struct nouveau_pll_vals *pv = &regp->pllvals;
111 struct pll_lims pll_lim; 119 struct nvbios_pll pll_lim;
112 120
113 if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim)) 121 if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
122 &pll_lim))
114 return; 123 return;
115 124
116 /* NM2 == 0 is used to determine single stage mode on two stage plls */ 125 /* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -126,28 +135,29 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
126 * has yet been observed in allowing the use a single stage pll on all 135 * has yet been observed in allowing the use a single stage pll on all
127 * nv43 however. the behaviour of single stage use is untested on nv40 136 * nv43 however. the behaviour of single stage use is untested on nv40
128 */ 137 */
129 if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2)) 138 if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
130 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); 139 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
131 140
132 if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv)) 141
142 if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv))
133 return; 143 return;
134 144
135 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; 145 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
136 146
137 /* The blob uses this always, so let's do the same */ 147 /* The blob uses this always, so let's do the same */
138 if (dev_priv->card_type == NV_40) 148 if (nv_device(drm->device)->card_type == NV_40)
139 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; 149 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
140 /* again nv40 and some nv43 act more like nv3x as described above */ 150 /* again nv40 and some nv43 act more like nv3x as described above */
141 if (dev_priv->chipset < 0x41) 151 if (nv_device(drm->device)->chipset < 0x41)
142 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | 152 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
143 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; 153 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
144 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; 154 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
145 155
146 if (pv->NM2) 156 if (pv->NM2)
147 NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", 157 NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
148 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); 158 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
149 else 159 else
150 NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n", 160 NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n",
151 pv->N1, pv->M1, pv->log2P); 161 pv->N1, pv->M1, pv->log2P);
152 162
153 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 163 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -158,10 +168,11 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
158{ 168{
159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 169 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
160 struct drm_device *dev = crtc->dev; 170 struct drm_device *dev = crtc->dev;
171 struct nouveau_drm *drm = nouveau_drm(dev);
161 unsigned char seq1 = 0, crtc17 = 0; 172 unsigned char seq1 = 0, crtc17 = 0;
162 unsigned char crtc1A; 173 unsigned char crtc1A;
163 174
164 NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode, 175 NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode,
165 nv_crtc->index); 176 nv_crtc->index);
166 177
167 if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */ 178 if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
@@ -225,9 +236,8 @@ static void
225nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) 236nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
226{ 237{
227 struct drm_device *dev = crtc->dev; 238 struct drm_device *dev = crtc->dev;
228 struct drm_nouveau_private *dev_priv = dev->dev_private;
229 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 239 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
230 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 240 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
231 struct drm_framebuffer *fb = crtc->fb; 241 struct drm_framebuffer *fb = crtc->fb;
232 242
233 /* Calculate our timings */ 243 /* Calculate our timings */
@@ -251,8 +261,8 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
251 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 261 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
252 262
253 if (encoder->crtc == crtc && 263 if (encoder->crtc == crtc &&
254 (nv_encoder->dcb->type == OUTPUT_LVDS || 264 (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
255 nv_encoder->dcb->type == OUTPUT_TMDS)) 265 nv_encoder->dcb->type == DCB_OUTPUT_TMDS))
256 fp_output = true; 266 fp_output = true;
257 } 267 }
258 268
@@ -264,7 +274,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
264 horizEnd = horizTotal - 2; 274 horizEnd = horizTotal - 2;
265 horizBlankEnd = horizTotal + 4; 275 horizBlankEnd = horizTotal + 4;
266#if 0 276#if 0
267 if (dev->overlayAdaptor && dev_priv->card_type >= NV_10) 277 if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
268 /* This reportedly works around some video overlay bandwidth problems */ 278 /* This reportedly works around some video overlay bandwidth problems */
269 horizTotal += 2; 279 horizTotal += 2;
270#endif 280#endif
@@ -452,10 +462,10 @@ static void
452nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) 462nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
453{ 463{
454 struct drm_device *dev = crtc->dev; 464 struct drm_device *dev = crtc->dev;
455 struct drm_nouveau_private *dev_priv = dev->dev_private; 465 struct nouveau_drm *drm = nouveau_drm(dev);
456 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 466 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
457 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 467 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
458 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; 468 struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
459 struct drm_encoder *encoder; 469 struct drm_encoder *encoder;
460 bool lvds_output = false, tmds_output = false, tv_output = false, 470 bool lvds_output = false, tmds_output = false, tv_output = false,
461 off_chip_digital = false; 471 off_chip_digital = false;
@@ -467,11 +477,11 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
467 if (encoder->crtc != crtc) 477 if (encoder->crtc != crtc)
468 continue; 478 continue;
469 479
470 if (nv_encoder->dcb->type == OUTPUT_LVDS) 480 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
471 digital = lvds_output = true; 481 digital = lvds_output = true;
472 if (nv_encoder->dcb->type == OUTPUT_TV) 482 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
473 tv_output = true; 483 tv_output = true;
474 if (nv_encoder->dcb->type == OUTPUT_TMDS) 484 if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
475 digital = tmds_output = true; 485 digital = tmds_output = true;
476 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital) 486 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
477 off_chip_digital = true; 487 off_chip_digital = true;
@@ -500,7 +510,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
500 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | 510 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
501 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | 511 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
502 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; 512 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
503 if (dev_priv->chipset >= 0x11) 513 if (nv_device(drm->device)->chipset >= 0x11)
504 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; 514 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
505 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 515 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
506 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; 516 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -533,7 +543,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
533 543
534 /* The blob seems to take the current value from crtc 0, add 4 to that 544 /* The blob seems to take the current value from crtc 0, add 4 to that
535 * and reuse the old value for crtc 1 */ 545 * and reuse the old value for crtc 1 */
536 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY]; 546 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
537 if (!nv_crtc->index) 547 if (!nv_crtc->index)
538 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4; 548 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
539 549
@@ -541,26 +551,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
541 * 1 << 30 on 0x60.830), for no apparent reason */ 551 * 1 << 30 on 0x60.830), for no apparent reason */
542 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; 552 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
543 553
544 if (dev_priv->card_type >= NV_30) 554 if (nv_device(drm->device)->card_type >= NV_30)
545 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; 555 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
546 556
547 regp->crtc_830 = mode->crtc_vdisplay - 3; 557 regp->crtc_830 = mode->crtc_vdisplay - 3;
548 regp->crtc_834 = mode->crtc_vdisplay - 1; 558 regp->crtc_834 = mode->crtc_vdisplay - 1;
549 559
550 if (dev_priv->card_type == NV_40) 560 if (nv_device(drm->device)->card_type == NV_40)
551 /* This is what the blob does */ 561 /* This is what the blob does */
552 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); 562 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
553 563
554 if (dev_priv->card_type >= NV_30) 564 if (nv_device(drm->device)->card_type >= NV_30)
555 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 565 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
556 566
557 if (dev_priv->card_type >= NV_10) 567 if (nv_device(drm->device)->card_type >= NV_10)
558 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; 568 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
559 else 569 else
560 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; 570 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
561 571
562 /* Some misc regs */ 572 /* Some misc regs */
563 if (dev_priv->card_type == NV_40) { 573 if (nv_device(drm->device)->card_type == NV_40) {
564 regp->CRTC[NV_CIO_CRE_85] = 0xFF; 574 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
565 regp->CRTC[NV_CIO_CRE_86] = 0x1; 575 regp->CRTC[NV_CIO_CRE_86] = 0x1;
566 } 576 }
@@ -572,7 +582,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
572 582
573 /* Generic PRAMDAC regs */ 583 /* Generic PRAMDAC regs */
574 584
575 if (dev_priv->card_type >= NV_10) 585 if (nv_device(drm->device)->card_type >= NV_10)
576 /* Only bit that bios and blob set. */ 586 /* Only bit that bios and blob set. */
577 regp->nv10_cursync = (1 << 25); 587 regp->nv10_cursync = (1 << 25);
578 588
@@ -581,7 +591,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
581 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; 591 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
582 if (crtc->fb->depth == 16) 592 if (crtc->fb->depth == 16)
583 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 593 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
584 if (dev_priv->chipset >= 0x11) 594 if (nv_device(drm->device)->chipset >= 0x11)
585 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; 595 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
586 596
587 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ 597 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -611,9 +621,9 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
611{ 621{
612 struct drm_device *dev = crtc->dev; 622 struct drm_device *dev = crtc->dev;
613 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
614 struct drm_nouveau_private *dev_priv = dev->dev_private; 624 struct nouveau_drm *drm = nouveau_drm(dev);
615 625
616 NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); 626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
617 drm_mode_debug_printmodeline(adjusted_mode); 627 drm_mode_debug_printmodeline(adjusted_mode);
618 628
619 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -621,8 +631,8 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
621 631
622 nv_crtc_mode_set_vga(crtc, adjusted_mode); 632 nv_crtc_mode_set_vga(crtc, adjusted_mode);
623 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ 633 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
624 if (dev_priv->card_type == NV_40) 634 if (nv_device(drm->device)->card_type == NV_40)
625 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); 635 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
626 nv_crtc_mode_set_regs(crtc, adjusted_mode); 636 nv_crtc_mode_set_regs(crtc, adjusted_mode);
627 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); 637 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
628 return 0; 638 return 0;
@@ -631,10 +641,10 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
631static void nv_crtc_save(struct drm_crtc *crtc) 641static void nv_crtc_save(struct drm_crtc *crtc)
632{ 642{
633 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 643 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
634 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 644 struct drm_device *dev = crtc->dev;
635 struct nv04_mode_state *state = &dev_priv->mode_reg; 645 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
636 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index]; 646 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
637 struct nv04_mode_state *saved = &dev_priv->saved_reg; 647 struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg;
638 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index]; 648 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
639 649
640 if (nv_two_heads(crtc->dev)) 650 if (nv_two_heads(crtc->dev))
@@ -652,14 +662,14 @@ static void nv_crtc_save(struct drm_crtc *crtc)
652static void nv_crtc_restore(struct drm_crtc *crtc) 662static void nv_crtc_restore(struct drm_crtc *crtc)
653{ 663{
654 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 664 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
655 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 665 struct drm_device *dev = crtc->dev;
656 int head = nv_crtc->index; 666 int head = nv_crtc->index;
657 uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21]; 667 uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
658 668
659 if (nv_two_heads(crtc->dev)) 669 if (nv_two_heads(crtc->dev))
660 NVSetOwner(crtc->dev, head); 670 NVSetOwner(crtc->dev, head);
661 671
662 nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg); 672 nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg);
663 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21); 673 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
664 674
665 nv_crtc->last_dpms = NV_DPMS_CLEARED; 675 nv_crtc->last_dpms = NV_DPMS_CLEARED;
@@ -668,7 +678,7 @@ static void nv_crtc_restore(struct drm_crtc *crtc)
668static void nv_crtc_prepare(struct drm_crtc *crtc) 678static void nv_crtc_prepare(struct drm_crtc *crtc)
669{ 679{
670 struct drm_device *dev = crtc->dev; 680 struct drm_device *dev = crtc->dev;
671 struct drm_nouveau_private *dev_priv = dev->dev_private; 681 struct nouveau_drm *drm = nouveau_drm(dev);
672 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 682 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
673 struct drm_crtc_helper_funcs *funcs = crtc->helper_private; 683 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
674 684
@@ -682,7 +692,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
682 692
683 /* Some more preparation. */ 693 /* Some more preparation. */
684 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); 694 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
685 if (dev_priv->card_type == NV_40) { 695 if (nv_device(drm->device)->card_type == NV_40) {
686 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); 696 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
687 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); 697 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
688 } 698 }
@@ -692,10 +702,9 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
692{ 702{
693 struct drm_device *dev = crtc->dev; 703 struct drm_device *dev = crtc->dev;
694 struct drm_crtc_helper_funcs *funcs = crtc->helper_private; 704 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
695 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
696 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 705 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
697 706
698 nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg); 707 nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
699 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); 708 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
700 709
701#ifdef __BIG_ENDIAN 710#ifdef __BIG_ENDIAN
@@ -715,8 +724,6 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
715{ 724{
716 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
717 726
718 NV_DEBUG_KMS(crtc->dev, "\n");
719
720 if (!nv_crtc) 727 if (!nv_crtc)
721 return; 728 return;
722 729
@@ -732,18 +739,17 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
732{ 739{
733 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 740 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
734 struct drm_device *dev = nv_crtc->base.dev; 741 struct drm_device *dev = nv_crtc->base.dev;
735 struct drm_nouveau_private *dev_priv = dev->dev_private;
736 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs; 742 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
737 int i; 743 int i;
738 744
739 rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC; 745 rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
740 for (i = 0; i < 256; i++) { 746 for (i = 0; i < 256; i++) {
741 rgbs[i].r = nv_crtc->lut.r[i] >> 8; 747 rgbs[i].r = nv_crtc->lut.r[i] >> 8;
742 rgbs[i].g = nv_crtc->lut.g[i] >> 8; 748 rgbs[i].g = nv_crtc->lut.g[i] >> 8;
743 rgbs[i].b = nv_crtc->lut.b[i] >> 8; 749 rgbs[i].b = nv_crtc->lut.b[i] >> 8;
744 } 750 }
745 751
746 nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg); 752 nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
747} 753}
748 754
749static void 755static void
@@ -779,18 +785,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
779{ 785{
780 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 786 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
781 struct drm_device *dev = crtc->dev; 787 struct drm_device *dev = crtc->dev;
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 788 struct nouveau_drm *drm = nouveau_drm(dev);
783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 789 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
784 struct drm_framebuffer *drm_fb; 790 struct drm_framebuffer *drm_fb;
785 struct nouveau_framebuffer *fb; 791 struct nouveau_framebuffer *fb;
786 int arb_burst, arb_lwm; 792 int arb_burst, arb_lwm;
787 int ret; 793 int ret;
788 794
789 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 795 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
790 796
791 /* no fb bound */ 797 /* no fb bound */
792 if (!atomic && !crtc->fb) { 798 if (!atomic && !crtc->fb) {
793 NV_DEBUG_KMS(dev, "No FB bound\n"); 799 NV_DEBUG(drm, "No FB bound\n");
794 return 0; 800 return 0;
795 } 801 }
796 802
@@ -858,7 +864,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
858 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); 864 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
859 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); 865 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
860 866
861 if (dev_priv->card_type >= NV_20) { 867 if (nv_device(drm->device)->card_type >= NV_20) {
862 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; 868 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
863 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); 869 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
864 } 870 }
@@ -878,8 +884,8 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
878 struct drm_framebuffer *fb, 884 struct drm_framebuffer *fb,
879 int x, int y, enum mode_set_atomic state) 885 int x, int y, enum mode_set_atomic state)
880{ 886{
881 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 887 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
882 struct drm_device *dev = dev_priv->dev; 888 struct drm_device *dev = drm->dev;
883 889
884 if (state == ENTER_ATOMIC_MODE_SET) 890 if (state == ENTER_ATOMIC_MODE_SET)
885 nouveau_fbcon_save_disable_accel(dev); 891 nouveau_fbcon_save_disable_accel(dev);
@@ -934,9 +940,9 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
934 940
935#ifdef __BIG_ENDIAN 941#ifdef __BIG_ENDIAN
936 { 942 {
937 struct drm_nouveau_private *dev_priv = dev->dev_private; 943 struct nouveau_drm *drm = nouveau_drm(dev);
938 944
939 if (dev_priv->chipset == 0x11) { 945 if (nv_device(drm->device)->chipset == 0x11) {
940 pixel = ((pixel & 0x000000ff) << 24) | 946 pixel = ((pixel & 0x000000ff) << 24) |
941 ((pixel & 0x0000ff00) << 8) | 947 ((pixel & 0x0000ff00) << 8) |
942 ((pixel & 0x00ff0000) >> 8) | 948 ((pixel & 0x00ff0000) >> 8) |
@@ -953,8 +959,8 @@ static int
953nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 959nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
954 uint32_t buffer_handle, uint32_t width, uint32_t height) 960 uint32_t buffer_handle, uint32_t width, uint32_t height)
955{ 961{
956 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 962 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
957 struct drm_device *dev = dev_priv->dev; 963 struct drm_device *dev = drm->dev;
958 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 964 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
959 struct nouveau_bo *cursor = NULL; 965 struct nouveau_bo *cursor = NULL;
960 struct drm_gem_object *gem; 966 struct drm_gem_object *gem;
@@ -977,7 +983,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
977 if (ret) 983 if (ret)
978 goto out; 984 goto out;
979 985
980 if (dev_priv->chipset >= 0x11) 986 if (nv_device(drm->device)->chipset >= 0x11)
981 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 987 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
982 else 988 else
983 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 989 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index aaf3de3bc816..d2ea8b460364 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -1,7 +1,7 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "drm_mode.h" 2#include "drm_mode.h"
3#include "nouveau_drm.h"
3#include "nouveau_reg.h" 4#include "nouveau_reg.h"
4#include "nouveau_drv.h"
5#include "nouveau_crtc.h" 5#include "nouveau_crtc.h"
6#include "nouveau_hw.h" 6#include "nouveau_hw.h"
7 7
@@ -38,8 +38,8 @@ static void
38nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) 38nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
39{ 39{
40 struct drm_device *dev = nv_crtc->base.dev; 40 struct drm_device *dev = nv_crtc->base.dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private; 41 struct nouveau_drm *drm = nouveau_drm(dev);
42 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 42 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
43 struct drm_crtc *crtc = &nv_crtc->base; 43 struct drm_crtc *crtc = &nv_crtc->base;
44 44
45 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] = 45 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
58 if (dev_priv->card_type == NV_40) 58 if (nv_device(drm->device)->card_type == NV_40)
59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index); 59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
60} 60}
61 61
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 38f19479417c..336f953084f9 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -27,22 +27,25 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "nouveau_hw.h"
35#include "nouveau_gpio.h"
36#include "nvreg.h" 35#include "nvreg.h"
37 36
37#include <subdev/bios/gpio.h>
38#include <subdev/gpio.h>
39#include <subdev/timer.h>
40
38int nv04_dac_output_offset(struct drm_encoder *encoder) 41int nv04_dac_output_offset(struct drm_encoder *encoder)
39{ 42{
40 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 43 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
41 int offset = 0; 44 int offset = 0;
42 45
43 if (dcb->or & (8 | OUTPUT_C)) 46 if (dcb->or & (8 | DCB_OUTPUT_C))
44 offset += 0x68; 47 offset += 0x68;
45 if (dcb->or & (8 | OUTPUT_B)) 48 if (dcb->or & (8 | DCB_OUTPUT_B))
46 offset += 0x2000; 49 offset += 0x2000;
47 50
48 return offset; 51 return offset;
@@ -62,6 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
62 65
63static int sample_load_twice(struct drm_device *dev, bool sense[2]) 66static int sample_load_twice(struct drm_device *dev, bool sense[2])
64{ 67{
68 struct nouveau_device *device = nouveau_dev(dev);
69 struct nouveau_timer *ptimer = nouveau_timer(device);
65 int i; 70 int i;
66 71
67 for (i = 0; i < 2; i++) { 72 for (i = 0; i < 2; i++) {
@@ -75,27 +80,30 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
75 * use a 10ms timeout (guards against crtc being inactive, in 80 * use a 10ms timeout (guards against crtc being inactive, in
76 * which case blank state would never change) 81 * which case blank state would never change)
77 */ 82 */
78 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 83 if (!nouveau_timer_wait_eq(ptimer, 10000000,
79 0x00000001, 0x00000000)) 84 NV_PRMCIO_INP0__COLOR,
85 0x00000001, 0x00000000))
80 return -EBUSY; 86 return -EBUSY;
81 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 87 if (!nouveau_timer_wait_eq(ptimer, 10000000,
82 0x00000001, 0x00000001)) 88 NV_PRMCIO_INP0__COLOR,
89 0x00000001, 0x00000001))
83 return -EBUSY; 90 return -EBUSY;
84 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 91 if (!nouveau_timer_wait_eq(ptimer, 10000000,
85 0x00000001, 0x00000000)) 92 NV_PRMCIO_INP0__COLOR,
93 0x00000001, 0x00000000))
86 return -EBUSY; 94 return -EBUSY;
87 95
88 udelay(100); 96 udelay(100);
89 /* when level triggers, sense is _LO_ */ 97 /* when level triggers, sense is _LO_ */
90 sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 98 sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
91 99
92 /* take another reading until it agrees with sense_a... */ 100 /* take another reading until it agrees with sense_a... */
93 do { 101 do {
94 udelay(100); 102 udelay(100);
95 sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 103 sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
96 if (sense_a != sense_b) { 104 if (sense_a != sense_b) {
97 sense_b_prime = 105 sense_b_prime =
98 nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 106 nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
99 if (sense_b == sense_b_prime) { 107 if (sense_b == sense_b_prime) {
100 /* ... unless two consecutive subsequent 108 /* ... unless two consecutive subsequent
101 * samples agree; sense_a is replaced */ 109 * samples agree; sense_a is replaced */
@@ -120,6 +128,8 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
120 struct drm_connector *connector) 128 struct drm_connector *connector)
121{ 129{
122 struct drm_device *dev = encoder->dev; 130 struct drm_device *dev = encoder->dev;
131 struct nouveau_device *device = nouveau_dev(dev);
132 struct nouveau_drm *drm = nouveau_drm(dev);
123 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 133 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
124 uint8_t saved_palette0[3], saved_palette_mask; 134 uint8_t saved_palette0[3], saved_palette_mask;
125 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 135 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
@@ -154,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
154 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); 164 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
155 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); 165 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
156 166
157 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); 167 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
158 for (i = 0; i < 3; i++) 168 for (i = 0; i < 3; i++)
159 saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA); 169 saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
160 saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK); 170 saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
161 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0); 171 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
162 172
163 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); 173 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
164 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, 174 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -171,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
171 do { 181 do {
172 bool sense_pair[2]; 182 bool sense_pair[2];
173 183
174 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 184 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
175 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); 185 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
176 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); 186 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
177 /* testing blue won't find monochrome monitors. I don't care */ 187 /* testing blue won't find monochrome monitors. I don't care */
178 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue); 188 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
179 189
180 i = 0; 190 i = 0;
181 /* take sample pairs until both samples in the pair agree */ 191 /* take sample pairs until both samples in the pair agree */
@@ -198,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
198 } while (++blue < 0x18 && sense); 208 } while (++blue < 0x18 && sense);
199 209
200out: 210out:
201 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); 211 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
202 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); 212 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
203 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 213 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
204 for (i = 0; i < 3; i++) 214 for (i = 0; i < 3; i++)
205 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); 215 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
206 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); 216 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 217 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
208 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 218 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -210,7 +220,7 @@ out:
210 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); 220 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
211 221
212 if (blue == 0x18) { 222 if (blue == 0x18) {
213 NV_INFO(dev, "Load detected on head A\n"); 223 NV_INFO(drm, "Load detected on head A\n");
214 return connector_status_connected; 224 return connector_status_connected;
215 } 225 }
216 226
@@ -220,43 +230,46 @@ out:
220uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) 230uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
221{ 231{
222 struct drm_device *dev = encoder->dev; 232 struct drm_device *dev = encoder->dev;
223 struct drm_nouveau_private *dev_priv = dev->dev_private; 233 struct nouveau_drm *drm = nouveau_drm(dev);
224 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 234 struct nouveau_device *device = nouveau_dev(dev);
235 struct nouveau_gpio *gpio = nouveau_gpio(device);
236 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
225 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 237 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
226 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 238 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
227 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; 239 saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput;
228 int head; 240 int head;
229 241
230#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 242#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
231 if (dcb->type == OUTPUT_TV) { 243 if (dcb->type == DCB_OUTPUT_TV) {
232 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); 244 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
233 245
234 if (dev_priv->vbios.tvdactestval) 246 if (drm->vbios.tvdactestval)
235 testval = dev_priv->vbios.tvdactestval; 247 testval = drm->vbios.tvdactestval;
236 } else { 248 } else {
237 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ 249 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
238 250
239 if (dev_priv->vbios.dactestval) 251 if (drm->vbios.dactestval)
240 testval = dev_priv->vbios.dactestval; 252 testval = drm->vbios.dactestval;
241 } 253 }
242 254
243 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 255 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
244 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 256 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
245 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); 257 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
246 258
247 saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2); 259 saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
248 260
249 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); 261 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
250 if (regoffset == 0x68) { 262 if (regoffset == 0x68) {
251 saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4); 263 saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
252 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); 264 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
253 } 265 }
254 266
255 saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); 267 if (gpio) {
256 saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); 268 saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
257 269 saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
258 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); 270 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
259 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); 271 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
272 }
260 273
261 msleep(4); 274 msleep(4);
262 275
@@ -270,8 +283,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
270 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ 283 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
271 routput = (saved_routput & 0xfffffece) | head << 8; 284 routput = (saved_routput & 0xfffffece) | head << 8;
272 285
273 if (dev_priv->card_type >= NV_40) { 286 if (nv_device(drm->device)->card_type >= NV_40) {
274 if (dcb->type == OUTPUT_TV) 287 if (dcb->type == DCB_OUTPUT_TV)
275 routput |= 0x1a << 16; 288 routput |= 0x1a << 16;
276 else 289 else
277 routput &= ~(0x1a << 16); 290 routput &= ~(0x1a << 16);
@@ -303,11 +316,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
303 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); 316 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
304 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); 317 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
305 if (regoffset == 0x68) 318 if (regoffset == 0x68)
306 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); 319 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
307 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); 320 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
308 321
309 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); 322 if (gpio) {
310 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); 323 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
324 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
325 }
311 326
312 return sample; 327 return sample;
313} 328}
@@ -315,15 +330,15 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
315static enum drm_connector_status 330static enum drm_connector_status
316nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 331nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
317{ 332{
318 struct drm_device *dev = encoder->dev; 333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
319 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 334 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
320 335
321 if (nv04_dac_in_use(encoder)) 336 if (nv04_dac_in_use(encoder))
322 return connector_status_disconnected; 337 return connector_status_disconnected;
323 338
324 if (nv17_dac_sample_load(encoder) & 339 if (nv17_dac_sample_load(encoder) &
325 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { 340 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
326 NV_INFO(dev, "Load detected on output %c\n", 341 NV_INFO(drm, "Load detected on output %c\n",
327 '@' + ffs(dcb->or)); 342 '@' + ffs(dcb->or));
328 return connector_status_connected; 343 return connector_status_connected;
329 } else { 344 } else {
@@ -357,7 +372,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
357 struct drm_display_mode *adjusted_mode) 372 struct drm_display_mode *adjusted_mode)
358{ 373{
359 struct drm_device *dev = encoder->dev; 374 struct drm_device *dev = encoder->dev;
360 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct nouveau_drm *drm = nouveau_drm(dev);
361 int head = nouveau_crtc(encoder->crtc)->index; 376 int head = nouveau_crtc(encoder->crtc)->index;
362 377
363 if (nv_gf4_disp_arch(dev)) { 378 if (nv_gf4_disp_arch(dev)) {
@@ -372,7 +387,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
372 /* force any other vga encoders to bind to the other crtc */ 387 /* force any other vga encoders to bind to the other crtc */
373 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { 388 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
374 if (rebind == encoder 389 if (rebind == encoder
375 || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG) 390 || nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG)
376 continue; 391 continue;
377 392
378 dac_offset = nv04_dac_output_offset(rebind); 393 dac_offset = nv04_dac_output_offset(rebind);
@@ -383,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
383 } 398 }
384 399
385 /* This could use refinement for flatpanels, but it should work this way */ 400 /* This could use refinement for flatpanels, but it should work this way */
386 if (dev_priv->chipset < 0x44) 401 if (nv_device(drm->device)->chipset < 0x44)
387 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 402 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
388 else 403 else
389 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 404 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -392,13 +407,13 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
392static void nv04_dac_commit(struct drm_encoder *encoder) 407static void nv04_dac_commit(struct drm_encoder *encoder)
393{ 408{
394 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 409 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
395 struct drm_device *dev = encoder->dev; 410 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
396 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 411 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
397 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 412 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
398 413
399 helper->dpms(encoder, DRM_MODE_DPMS_ON); 414 helper->dpms(encoder, DRM_MODE_DPMS_ON);
400 415
401 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 416 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
402 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
403 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
404} 419}
@@ -406,11 +421,10 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
406void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) 421void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
407{ 422{
408 struct drm_device *dev = encoder->dev; 423 struct drm_device *dev = encoder->dev;
409 struct drm_nouveau_private *dev_priv = dev->dev_private; 424 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
410 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
411 425
412 if (nv_gf4_disp_arch(dev)) { 426 if (nv_gf4_disp_arch(dev)) {
413 uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1]; 427 uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1];
414 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); 428 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
415 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off); 429 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
416 430
@@ -431,23 +445,23 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
431 * someone else. */ 445 * someone else. */
432bool nv04_dac_in_use(struct drm_encoder *encoder) 446bool nv04_dac_in_use(struct drm_encoder *encoder)
433{ 447{
434 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 448 struct drm_device *dev = encoder->dev;
435 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 449 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
436 450
437 return nv_gf4_disp_arch(encoder->dev) && 451 return nv_gf4_disp_arch(encoder->dev) &&
438 (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); 452 (nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
439} 453}
440 454
441static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) 455static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
442{ 456{
443 struct drm_device *dev = encoder->dev;
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 457 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
458 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
445 459
446 if (nv_encoder->last_dpms == mode) 460 if (nv_encoder->last_dpms == mode)
447 return; 461 return;
448 nv_encoder->last_dpms = mode; 462 nv_encoder->last_dpms = mode;
449 463
450 NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n", 464 NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
451 mode, nv_encoder->dcb->index); 465 mode, nv_encoder->dcb->index);
452 466
453 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 467 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
@@ -479,8 +493,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
479{ 493{
480 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 494 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
481 495
482 NV_DEBUG_KMS(encoder->dev, "\n");
483
484 drm_encoder_cleanup(encoder); 496 drm_encoder_cleanup(encoder);
485 kfree(nv_encoder); 497 kfree(nv_encoder);
486} 498}
@@ -512,7 +524,7 @@ static const struct drm_encoder_funcs nv04_dac_funcs = {
512}; 524};
513 525
514int 526int
515nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) 527nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
516{ 528{
517 const struct drm_encoder_helper_funcs *helper; 529 const struct drm_encoder_helper_funcs *helper;
518 struct nouveau_encoder *nv_encoder = NULL; 530 struct nouveau_encoder *nv_encoder = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c2675623b7cd..e53df742cc01 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -27,7 +27,8 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_reg.h"
31#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 33#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
@@ -36,6 +37,8 @@
36 37
37#include "i2c/sil164.h" 38#include "i2c/sil164.h"
38 39
40#include <subdev/i2c.h>
41
39#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \ 42#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
40 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \ 43 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
41 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS) 44 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
@@ -49,20 +52,20 @@ static inline bool is_fpc_off(uint32_t fpc)
49 FP_TG_CONTROL_OFF); 52 FP_TG_CONTROL_OFF);
50} 53}
51 54
52int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent) 55int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent)
53{ 56{
54 /* special case of nv_read_tmds to find crtc associated with an output. 57 /* special case of nv_read_tmds to find crtc associated with an output.
55 * this does not give a correct answer for off-chip dvi, but there's no 58 * this does not give a correct answer for off-chip dvi, but there's no
56 * use for such an answer anyway 59 * use for such an answer anyway
57 */ 60 */
58 int ramdac = (dcbent->or & OUTPUT_C) >> 2; 61 int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
59 62
60 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL, 63 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
61 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4); 64 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
62 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac; 65 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
63} 66}
64 67
65void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, 68void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
66 int head, bool dl) 69 int head, bool dl)
67{ 70{
68 /* The BIOS scripts don't do this for us, sadly 71 /* The BIOS scripts don't do this for us, sadly
@@ -72,13 +75,13 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
72 * (for VT restore etc.) 75 * (for VT restore etc.)
73 */ 76 */
74 77
75 int ramdac = (dcbent->or & OUTPUT_C) >> 2; 78 int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
76 uint8_t tmds04 = 0x80; 79 uint8_t tmds04 = 0x80;
77 80
78 if (head != ramdac) 81 if (head != ramdac)
79 tmds04 = 0x88; 82 tmds04 = 0x88;
80 83
81 if (dcbent->type == OUTPUT_LVDS) 84 if (dcbent->type == DCB_OUTPUT_LVDS)
82 tmds04 |= 0x01; 85 tmds04 |= 0x01;
83 86
84 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04); 87 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
@@ -89,8 +92,7 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
89 92
90void nv04_dfp_disable(struct drm_device *dev, int head) 93void nv04_dfp_disable(struct drm_device *dev, int head)
91{ 94{
92 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
93 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
94 96
95 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & 97 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
96 FP_TG_CONTROL_ON) { 98 FP_TG_CONTROL_ON) {
@@ -111,14 +113,13 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
111void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) 113void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
112{ 114{
113 struct drm_device *dev = encoder->dev; 115 struct drm_device *dev = encoder->dev;
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 struct drm_crtc *crtc; 116 struct drm_crtc *crtc;
116 struct nouveau_crtc *nv_crtc; 117 struct nouveau_crtc *nv_crtc;
117 uint32_t *fpc; 118 uint32_t *fpc;
118 119
119 if (mode == DRM_MODE_DPMS_ON) { 120 if (mode == DRM_MODE_DPMS_ON) {
120 nv_crtc = nouveau_crtc(encoder->crtc); 121 nv_crtc = nouveau_crtc(encoder->crtc);
121 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; 122 fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
122 123
123 if (is_fpc_off(*fpc)) { 124 if (is_fpc_off(*fpc)) {
124 /* using saved value is ok, as (is_digital && dpms_on && 125 /* using saved value is ok, as (is_digital && dpms_on &&
@@ -133,7 +134,7 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
133 } else { 134 } else {
134 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
135 nv_crtc = nouveau_crtc(crtc); 136 nv_crtc = nouveau_crtc(crtc);
136 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; 137 fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
137 138
138 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index); 139 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
139 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) { 140 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
@@ -151,10 +152,10 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
151static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder) 152static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
152{ 153{
153 struct drm_device *dev = encoder->dev; 154 struct drm_device *dev = encoder->dev;
154 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 155 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
155 struct drm_encoder *slave; 156 struct drm_encoder *slave;
156 157
157 if (dcb->type != OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP) 158 if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
158 return NULL; 159 return NULL;
159 160
160 /* Some BIOSes (e.g. the one in a Quadro FX1000) report several 161 /* Some BIOSes (e.g. the one in a Quadro FX1000) report several
@@ -168,9 +169,9 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
168 * let's do the same. 169 * let's do the same.
169 */ 170 */
170 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { 171 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
171 struct dcb_entry *slave_dcb = nouveau_encoder(slave)->dcb; 172 struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
172 173
173 if (slave_dcb->type == OUTPUT_TMDS && get_slave_funcs(slave) && 174 if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
174 slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr) 175 slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
175 return slave; 176 return slave;
176 } 177 }
@@ -202,9 +203,8 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
202static void nv04_dfp_prepare_sel_clk(struct drm_device *dev, 203static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
203 struct nouveau_encoder *nv_encoder, int head) 204 struct nouveau_encoder *nv_encoder, int head)
204{ 205{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 206 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
206 struct nv04_mode_state *state = &dev_priv->mode_reg; 207 uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000;
207 uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
208 208
209 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP) 209 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
210 return; 210 return;
@@ -233,8 +233,8 @@ static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
233 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table 233 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
234 * entry has the necessary info) 234 * entry has the necessary info)
235 */ 235 */
236 if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) { 236 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) {
237 int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1; 237 int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1;
238 238
239 state->sel_clk &= ~0xf0; 239 state->sel_clk &= ~0xf0;
240 state->sel_clk |= (head ? 0x40 : 0x10) << shift; 240 state->sel_clk |= (head ? 0x40 : 0x10) << shift;
@@ -246,9 +246,8 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
246 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 246 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
247 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 247 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
248 struct drm_device *dev = encoder->dev; 248 struct drm_device *dev = encoder->dev;
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 int head = nouveau_crtc(encoder->crtc)->index; 249 int head = nouveau_crtc(encoder->crtc)->index;
251 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; 250 struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
252 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; 251 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
253 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; 252 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
254 253
@@ -263,7 +262,7 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
263 *cr_lcd |= head ? 0x0 : 0x8; 262 *cr_lcd |= head ? 0x0 : 0x8;
264 else { 263 else {
265 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; 264 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
266 if (nv_encoder->dcb->type == OUTPUT_LVDS) 265 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
267 *cr_lcd |= 0x30; 266 *cr_lcd |= 0x30;
268 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { 267 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
269 /* avoid being connected to both crtcs */ 268 /* avoid being connected to both crtcs */
@@ -282,17 +281,18 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
282 struct drm_display_mode *adjusted_mode) 281 struct drm_display_mode *adjusted_mode)
283{ 282{
284 struct drm_device *dev = encoder->dev; 283 struct drm_device *dev = encoder->dev;
285 struct drm_nouveau_private *dev_priv = dev->dev_private; 284 struct nouveau_device *device = nouveau_dev(dev);
285 struct nouveau_drm *drm = nouveau_drm(dev);
286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
287 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
288 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; 288 struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc); 289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
291 struct drm_display_mode *output_mode = &nv_encoder->mode; 291 struct drm_display_mode *output_mode = &nv_encoder->mode;
292 struct drm_connector *connector = &nv_connector->base; 292 struct drm_connector *connector = &nv_connector->base;
293 uint32_t mode_ratio, panel_ratio; 293 uint32_t mode_ratio, panel_ratio;
294 294
295 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index); 295 NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
296 drm_mode_debug_printmodeline(output_mode); 296 drm_mode_debug_printmodeline(output_mode);
297 297
298 /* Initialize the FP registers in this CRTC. */ 298 /* Initialize the FP registers in this CRTC. */
@@ -300,10 +300,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
300 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; 300 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
301 if (!nv_gf4_disp_arch(dev) || 301 if (!nv_gf4_disp_arch(dev) ||
302 (output_mode->hsync_start - output_mode->hdisplay) >= 302 (output_mode->hsync_start - output_mode->hdisplay) >=
303 dev_priv->vbios.digital_min_front_porch) 303 drm->vbios.digital_min_front_porch)
304 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; 304 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
305 else 305 else
306 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1; 306 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1;
307 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; 307 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
308 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; 308 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
309 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; 309 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
@@ -335,12 +335,12 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; 335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
336 else /* gpu needs to scale */ 336 else /* gpu needs to scale */
337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; 337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
338 if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) 338 if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; 339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && 340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
341 output_mode->clock > 165000) 341 output_mode->clock > 165000)
342 regp->fp_control |= (2 << 24); 342 regp->fp_control |= (2 << 24);
343 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 343 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
344 bool duallink = false, dummy; 344 bool duallink = false, dummy;
345 if (nv_connector->edid && 345 if (nv_connector->edid &&
346 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { 346 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || 416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO && 417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) { 418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
419 if (dev_priv->chipset == 0x11) 419 if (nv_device(drm->device)->chipset == 0x11)
420 regp->dither = savep->dither | 0x00010000; 420 regp->dither = savep->dither | 0x00010000;
421 else { 421 else {
422 int i; 422 int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
427 } 427 }
428 } 428 }
429 } else { 429 } else {
430 if (dev_priv->chipset != 0x11) { 430 if (nv_device(drm->device)->chipset != 0x11) {
431 /* reset them */ 431 /* reset them */
432 int i; 432 int i;
433 for (i = 0; i < 3; i++) { 433 for (i = 0; i < 3; i++) {
@@ -444,26 +444,26 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
444static void nv04_dfp_commit(struct drm_encoder *encoder) 444static void nv04_dfp_commit(struct drm_encoder *encoder)
445{ 445{
446 struct drm_device *dev = encoder->dev; 446 struct drm_device *dev = encoder->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private; 447 struct nouveau_drm *drm = nouveau_drm(dev);
448 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 448 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
449 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 449 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
450 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 450 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
451 struct dcb_entry *dcbe = nv_encoder->dcb; 451 struct dcb_output *dcbe = nv_encoder->dcb;
452 int head = nouveau_crtc(encoder->crtc)->index; 452 int head = nouveau_crtc(encoder->crtc)->index;
453 struct drm_encoder *slave_encoder; 453 struct drm_encoder *slave_encoder;
454 454
455 if (dcbe->type == OUTPUT_TMDS) 455 if (dcbe->type == DCB_OUTPUT_TMDS)
456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
457 else if (dcbe->type == OUTPUT_LVDS) 457 else if (dcbe->type == DCB_OUTPUT_LVDS)
458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); 458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
459 459
460 /* update fp_control state for any changes made by scripts, 460 /* update fp_control state for any changes made by scripts,
461 * so correct value is written at DPMS on */ 461 * so correct value is written at DPMS on */
462 dev_priv->mode_reg.crtc_reg[head].fp_control = 462 nv04_display(dev)->mode_reg.crtc_reg[head].fp_control =
463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); 463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
464 464
465 /* This could use refinement for flatpanels, but it should work this way */ 465 /* This could use refinement for flatpanels, but it should work this way */
466 if (dev_priv->chipset < 0x44) 466 if (nv_device(drm->device)->chipset < 0x44)
467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
468 else 468 else
469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -476,7 +476,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
476 476
477 helper->dpms(encoder, DRM_MODE_DPMS_ON); 477 helper->dpms(encoder, DRM_MODE_DPMS_ON);
478 478
479 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 479 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
482} 482}
@@ -485,6 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
485{ 485{
486#ifdef __powerpc__ 486#ifdef __powerpc__
487 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
488 struct nouveau_device *device = nouveau_dev(dev);
488 489
489 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
490 * Apple for your consistency. 491 * Apple for your consistency.
@@ -492,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
492 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 493 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
493 dev->pci_device == 0x0329) { 494 dev->pci_device == 0x0329) {
494 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
495 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
496 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
497 } else { 498 } else {
498 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); 499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
499 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0); 500 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
500 } 501 }
501 } 502 }
502#endif 503#endif
@@ -511,7 +512,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
511{ 512{
512 struct drm_device *dev = encoder->dev; 513 struct drm_device *dev = encoder->dev;
513 struct drm_crtc *crtc = encoder->crtc; 514 struct drm_crtc *crtc = encoder->crtc;
514 struct drm_nouveau_private *dev_priv = dev->dev_private; 515 struct nouveau_drm *drm = nouveau_drm(dev);
515 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 516 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
516 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms); 517 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
517 518
@@ -519,7 +520,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
519 return; 520 return;
520 nv_encoder->last_dpms = mode; 521 nv_encoder->last_dpms = mode;
521 522
522 NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", 523 NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
523 mode, nv_encoder->dcb->index); 524 mode, nv_encoder->dcb->index);
524 525
525 if (was_powersaving && is_powersaving_dpms(mode)) 526 if (was_powersaving && is_powersaving_dpms(mode))
@@ -549,22 +550,22 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
549 if (mode == DRM_MODE_DPMS_ON) 550 if (mode == DRM_MODE_DPMS_ON)
550 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index); 551 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
551 else { 552 else {
552 dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 553 nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
553 dev_priv->mode_reg.sel_clk &= ~0xf0; 554 nv04_display(dev)->mode_reg.sel_clk &= ~0xf0;
554 } 555 }
555 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); 556 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
556} 557}
557 558
558static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) 559static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
559{ 560{
560 struct drm_device *dev = encoder->dev; 561 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
561 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 562 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
562 563
563 if (nv_encoder->last_dpms == mode) 564 if (nv_encoder->last_dpms == mode)
564 return; 565 return;
565 nv_encoder->last_dpms = mode; 566 nv_encoder->last_dpms = mode;
566 567
567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 568 NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
568 mode, nv_encoder->dcb->index); 569 mode, nv_encoder->dcb->index);
569 570
570 nv04_dfp_update_backlight(encoder, mode); 571 nv04_dfp_update_backlight(encoder, mode);
@@ -585,10 +586,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
585{ 586{
586 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 587 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
587 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
588 struct drm_nouveau_private *dev_priv = dev->dev_private;
589 int head = nv_encoder->restore.head; 589 int head = nv_encoder->restore.head;
590 590
591 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 591 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
592 struct nouveau_connector *connector = 592 struct nouveau_connector *connector =
593 nouveau_encoder_connector_get(nv_encoder); 593 nouveau_encoder_connector_get(nv_encoder);
594 594
@@ -597,9 +597,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
597 LVDS_PANEL_ON, 597 LVDS_PANEL_ON,
598 connector->native_mode->clock); 598 connector->native_mode->clock);
599 599
600 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { 600 } else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
601 int clock = nouveau_hw_pllvals_to_clk 601 int clock = nouveau_hw_pllvals_to_clk
602 (&dev_priv->saved_reg.crtc_reg[head].pllvals); 602 (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals);
603 603
604 run_tmds_table(dev, nv_encoder->dcb, head, clock); 604 run_tmds_table(dev, nv_encoder->dcb, head, clock);
605 } 605 }
@@ -611,8 +611,6 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
611{ 611{
612 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 612 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
613 613
614 NV_DEBUG_KMS(encoder->dev, "\n");
615
616 if (get_slave_funcs(encoder)) 614 if (get_slave_funcs(encoder))
617 get_slave_funcs(encoder)->destroy(encoder); 615 get_slave_funcs(encoder)->destroy(encoder);
618 616
@@ -623,8 +621,10 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
623static void nv04_tmds_slave_init(struct drm_encoder *encoder) 621static void nv04_tmds_slave_init(struct drm_encoder *encoder)
624{ 622{
625 struct drm_device *dev = encoder->dev; 623 struct drm_device *dev = encoder->dev;
626 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
627 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, 2); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
627 struct nouveau_i2c_port *port = i2c->find(i2c, 2);
628 struct i2c_board_info info[] = { 628 struct i2c_board_info info[] = {
629 { 629 {
630 .type = "sil164", 630 .type = "sil164",
@@ -637,16 +637,16 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
637 }; 637 };
638 int type; 638 int type;
639 639
640 if (!nv_gf4_disp_arch(dev) || !i2c || 640 if (!nv_gf4_disp_arch(dev) || !port ||
641 get_tmds_slave(encoder)) 641 get_tmds_slave(encoder))
642 return; 642 return;
643 643
644 type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2); 644 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
645 if (type < 0) 645 if (type < 0)
646 return; 646 return;
647 647
648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
649 &i2c->adapter, &info[type]); 649 &port->adapter, &info[type]);
650} 650}
651 651
652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
@@ -676,7 +676,7 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = {
676}; 676};
677 677
678int 678int
679nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) 679nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
680{ 680{
681 const struct drm_encoder_helper_funcs *helper; 681 const struct drm_encoder_helper_funcs *helper;
682 struct nouveau_encoder *nv_encoder = NULL; 682 struct nouveau_encoder *nv_encoder = NULL;
@@ -684,11 +684,11 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
684 int type; 684 int type;
685 685
686 switch (entry->type) { 686 switch (entry->type) {
687 case OUTPUT_TMDS: 687 case DCB_OUTPUT_TMDS:
688 type = DRM_MODE_ENCODER_TMDS; 688 type = DRM_MODE_ENCODER_TMDS;
689 helper = &nv04_tmds_helper_funcs; 689 helper = &nv04_tmds_helper_funcs;
690 break; 690 break;
691 case OUTPUT_LVDS: 691 case DCB_OUTPUT_LVDS:
692 type = DRM_MODE_ENCODER_LVDS; 692 type = DRM_MODE_ENCODER_LVDS;
693 helper = &nv04_lvds_helper_funcs; 693 helper = &nv04_lvds_helper_funcs;
694 break; 694 break;
@@ -711,7 +711,7 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
711 encoder->possible_crtcs = entry->heads; 711 encoder->possible_crtcs = entry->heads;
712 encoder->possible_clones = 0; 712 encoder->possible_clones = 0;
713 713
714 if (entry->type == OUTPUT_TMDS && 714 if (entry->type == DCB_OUTPUT_TMDS &&
715 entry->location != DCB_LOC_ON_CHIP) 715 entry->location != DCB_LOC_ON_CHIP)
716 nv04_tmds_slave_init(encoder); 716 nv04_tmds_slave_init(encoder);
717 717
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 44488e3a257d..b25b8d9c2fcc 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -26,78 +26,15 @@
26#include "drm.h" 26#include "drm.h"
27#include "drm_crtc_helper.h" 27#include "drm_crtc_helper.h"
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_fb.h" 30#include "nouveau_reg.h"
31#include "nouveau_hw.h" 31#include "nouveau_hw.h"
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34 34
35static void nv04_vblank_crtc0_isr(struct drm_device *);
36static void nv04_vblank_crtc1_isr(struct drm_device *);
37
38static void
39nv04_display_store_initial_head_owner(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42
43 if (dev_priv->chipset != 0x11) {
44 dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
45 return;
46 }
47
48 /* reading CR44 is broken on nv11, so we attempt to infer it */
49 if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
50 dev_priv->crtc_owner = 0x4;
51 else {
52 uint8_t slaved_on_A, slaved_on_B;
53 bool tvA = false;
54 bool tvB = false;
55
56 slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
57 0x80;
58 if (slaved_on_B)
59 tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
60 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
61
62 slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
63 0x80;
64 if (slaved_on_A)
65 tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
66 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
67
68 if (slaved_on_A && !tvA)
69 dev_priv->crtc_owner = 0x0;
70 else if (slaved_on_B && !tvB)
71 dev_priv->crtc_owner = 0x3;
72 else if (slaved_on_A)
73 dev_priv->crtc_owner = 0x0;
74 else if (slaved_on_B)
75 dev_priv->crtc_owner = 0x3;
76 else
77 dev_priv->crtc_owner = 0x0;
78 }
79}
80
81int 35int
82nv04_display_early_init(struct drm_device *dev) 36nv04_display_early_init(struct drm_device *dev)
83{ 37{
84 /* Make the I2C buses accessible. */
85 if (!nv_gf4_disp_arch(dev)) {
86 uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
87
88 if (!(pmc_enable & 1))
89 nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
90 }
91
92 /* Unlock the VGA CRTCs. */
93 NVLockVgaCrtcs(dev, false);
94
95 /* Make sure the CRTCs aren't in slaved mode. */
96 if (nv_two_heads(dev)) {
97 nv04_display_store_initial_head_owner(dev);
98 NVSetOwner(dev, 0);
99 }
100
101 /* ensure vblank interrupts are off, they can't be enabled until 38 /* ensure vblank interrupts are off, they can't be enabled until
102 * drm_vblank has been initialised 39 * drm_vblank has been initialised
103 */ 40 */
@@ -111,25 +48,29 @@ nv04_display_early_init(struct drm_device *dev)
111void 48void
112nv04_display_late_takedown(struct drm_device *dev) 49nv04_display_late_takedown(struct drm_device *dev)
113{ 50{
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115
116 if (nv_two_heads(dev))
117 NVSetOwner(dev, dev_priv->crtc_owner);
118
119 NVLockVgaCrtcs(dev, true);
120} 51}
121 52
122int 53int
123nv04_display_create(struct drm_device *dev) 54nv04_display_create(struct drm_device *dev)
124{ 55{
125 struct drm_nouveau_private *dev_priv = dev->dev_private; 56 struct nouveau_drm *drm = nouveau_drm(dev);
126 struct dcb_table *dcb = &dev_priv->vbios.dcb; 57 struct dcb_table *dcb = &drm->vbios.dcb;
127 struct drm_connector *connector, *ct; 58 struct drm_connector *connector, *ct;
128 struct drm_encoder *encoder; 59 struct drm_encoder *encoder;
129 struct drm_crtc *crtc; 60 struct drm_crtc *crtc;
61 struct nv04_display *disp;
130 int i, ret; 62 int i, ret;
131 63
132 NV_DEBUG_KMS(dev, "\n"); 64 NV_DEBUG(drm, "\n");
65
66 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
67 if (!disp)
68 return -ENOMEM;
69
70 nouveau_display(dev)->priv = disp;
71 nouveau_display(dev)->dtor = nv04_display_destroy;
72 nouveau_display(dev)->init = nv04_display_init;
73 nouveau_display(dev)->fini = nv04_display_fini;
133 74
134 nouveau_hw_save_vga_fonts(dev, 1); 75 nouveau_hw_save_vga_fonts(dev, 1);
135 76
@@ -138,28 +79,28 @@ nv04_display_create(struct drm_device *dev)
138 nv04_crtc_create(dev, 1); 79 nv04_crtc_create(dev, 1);
139 80
140 for (i = 0; i < dcb->entries; i++) { 81 for (i = 0; i < dcb->entries; i++) {
141 struct dcb_entry *dcbent = &dcb->entry[i]; 82 struct dcb_output *dcbent = &dcb->entry[i];
142 83
143 connector = nouveau_connector_create(dev, dcbent->connector); 84 connector = nouveau_connector_create(dev, dcbent->connector);
144 if (IS_ERR(connector)) 85 if (IS_ERR(connector))
145 continue; 86 continue;
146 87
147 switch (dcbent->type) { 88 switch (dcbent->type) {
148 case OUTPUT_ANALOG: 89 case DCB_OUTPUT_ANALOG:
149 ret = nv04_dac_create(connector, dcbent); 90 ret = nv04_dac_create(connector, dcbent);
150 break; 91 break;
151 case OUTPUT_LVDS: 92 case DCB_OUTPUT_LVDS:
152 case OUTPUT_TMDS: 93 case DCB_OUTPUT_TMDS:
153 ret = nv04_dfp_create(connector, dcbent); 94 ret = nv04_dfp_create(connector, dcbent);
154 break; 95 break;
155 case OUTPUT_TV: 96 case DCB_OUTPUT_TV:
156 if (dcbent->location == DCB_LOC_ON_CHIP) 97 if (dcbent->location == DCB_LOC_ON_CHIP)
157 ret = nv17_tv_create(connector, dcbent); 98 ret = nv17_tv_create(connector, dcbent);
158 else 99 else
159 ret = nv04_tv_create(connector, dcbent); 100 ret = nv04_tv_create(connector, dcbent);
160 break; 101 break;
161 default: 102 default:
162 NV_WARN(dev, "DCB type %d not known\n", dcbent->type); 103 NV_WARN(drm, "DCB type %d not known\n", dcbent->type);
163 continue; 104 continue;
164 } 105 }
165 106
@@ -170,7 +111,7 @@ nv04_display_create(struct drm_device *dev)
170 list_for_each_entry_safe(connector, ct, 111 list_for_each_entry_safe(connector, ct,
171 &dev->mode_config.connector_list, head) { 112 &dev->mode_config.connector_list, head) {
172 if (!connector->encoder_ids[0]) { 113 if (!connector->encoder_ids[0]) {
173 NV_WARN(dev, "%s has no encoders, removing\n", 114 NV_WARN(drm, "%s has no encoders, removing\n",
174 drm_get_connector_name(connector)); 115 drm_get_connector_name(connector));
175 connector->funcs->destroy(connector); 116 connector->funcs->destroy(connector);
176 } 117 }
@@ -186,21 +127,18 @@ nv04_display_create(struct drm_device *dev)
186 func->save(encoder); 127 func->save(encoder);
187 } 128 }
188 129
189 nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
190 nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
191 return 0; 130 return 0;
192} 131}
193 132
194void 133void
195nv04_display_destroy(struct drm_device *dev) 134nv04_display_destroy(struct drm_device *dev)
196{ 135{
136 struct nouveau_drm *drm = nouveau_drm(dev);
137 struct nv04_display *disp = nv04_display(dev);
197 struct drm_encoder *encoder; 138 struct drm_encoder *encoder;
198 struct drm_crtc *crtc; 139 struct drm_crtc *crtc;
199 140
200 NV_DEBUG_KMS(dev, "\n"); 141 NV_DEBUG(drm, "\n");
201
202 nouveau_irq_unregister(dev, 24);
203 nouveau_irq_unregister(dev, 25);
204 142
205 /* Turn every CRTC off. */ 143 /* Turn every CRTC off. */
206 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 144 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -222,6 +160,9 @@ nv04_display_destroy(struct drm_device *dev)
222 crtc->funcs->restore(crtc); 160 crtc->funcs->restore(crtc);
223 161
224 nouveau_hw_save_vga_fonts(dev, 0); 162 nouveau_hw_save_vga_fonts(dev, 0);
163
164 nouveau_display(dev)->priv = NULL;
165 kfree(disp);
225} 166}
226 167
227int 168int
@@ -258,17 +199,3 @@ nv04_display_fini(struct drm_device *dev)
258 if (nv_two_heads(dev)) 199 if (nv_two_heads(dev))
259 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); 200 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
260} 201}
261
262static void
263nv04_vblank_crtc0_isr(struct drm_device *dev)
264{
265 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
266 drm_handle_vblank(dev, 0);
267}
268
269static void
270nv04_vblank_crtc1_isr(struct drm_device *dev)
271{
272 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
273 drm_handle_vblank(dev, 1);
274}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
new file mode 100644
index 000000000000..45322802e37d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -0,0 +1,184 @@
1#ifndef __NV04_DISPLAY_H__
2#define __NV04_DISPLAY_H__
3
4#include <subdev/bios/pll.h>
5
6#include "nouveau_display.h"
7
8enum nv04_fp_display_regs {
9 FP_DISPLAY_END,
10 FP_TOTAL,
11 FP_CRTC,
12 FP_SYNC_START,
13 FP_SYNC_END,
14 FP_VALID_START,
15 FP_VALID_END
16};
17
18struct nv04_crtc_reg {
19 unsigned char MiscOutReg;
20 uint8_t CRTC[0xa0];
21 uint8_t CR58[0x10];
22 uint8_t Sequencer[5];
23 uint8_t Graphics[9];
24 uint8_t Attribute[21];
25 unsigned char DAC[768];
26
27 /* PCRTC regs */
28 uint32_t fb_start;
29 uint32_t crtc_cfg;
30 uint32_t cursor_cfg;
31 uint32_t gpio_ext;
32 uint32_t crtc_830;
33 uint32_t crtc_834;
34 uint32_t crtc_850;
35 uint32_t crtc_eng_ctrl;
36
37 /* PRAMDAC regs */
38 uint32_t nv10_cursync;
39 struct nouveau_pll_vals pllvals;
40 uint32_t ramdac_gen_ctrl;
41 uint32_t ramdac_630;
42 uint32_t ramdac_634;
43 uint32_t tv_setup;
44 uint32_t tv_vtotal;
45 uint32_t tv_vskew;
46 uint32_t tv_vsync_delay;
47 uint32_t tv_htotal;
48 uint32_t tv_hskew;
49 uint32_t tv_hsync_delay;
50 uint32_t tv_hsync_delay2;
51 uint32_t fp_horiz_regs[7];
52 uint32_t fp_vert_regs[7];
53 uint32_t dither;
54 uint32_t fp_control;
55 uint32_t dither_regs[6];
56 uint32_t fp_debug_0;
57 uint32_t fp_debug_1;
58 uint32_t fp_debug_2;
59 uint32_t fp_margin_color;
60 uint32_t ramdac_8c0;
61 uint32_t ramdac_a20;
62 uint32_t ramdac_a24;
63 uint32_t ramdac_a34;
64 uint32_t ctv_regs[38];
65};
66
67struct nv04_output_reg {
68 uint32_t output;
69 int head;
70};
71
72struct nv04_mode_state {
73 struct nv04_crtc_reg crtc_reg[2];
74 uint32_t pllsel;
75 uint32_t sel_clk;
76};
77
78struct nv04_display {
79 struct nv04_mode_state mode_reg;
80 struct nv04_mode_state saved_reg;
81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4];
83};
84
85static inline struct nv04_display *
86nv04_display(struct drm_device *dev)
87{
88 return nouveau_display(dev)->priv;
89}
90
91/* nv04_display.c */
92int nv04_display_early_init(struct drm_device *);
93void nv04_display_late_takedown(struct drm_device *);
94int nv04_display_create(struct drm_device *);
95void nv04_display_destroy(struct drm_device *);
96int nv04_display_init(struct drm_device *);
97void nv04_display_fini(struct drm_device *);
98
99/* nv04_crtc.c */
100int nv04_crtc_create(struct drm_device *, int index);
101
102/* nv04_dac.c */
103int nv04_dac_create(struct drm_connector *, struct dcb_output *);
104uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
105int nv04_dac_output_offset(struct drm_encoder *encoder);
106void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
107bool nv04_dac_in_use(struct drm_encoder *encoder);
108
109/* nv04_dfp.c */
110int nv04_dfp_create(struct drm_connector *, struct dcb_output *);
111int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent);
112void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
113 int head, bool dl);
114void nv04_dfp_disable(struct drm_device *dev, int head);
115void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
116
117/* nv04_tv.c */
118int nv04_tv_identify(struct drm_device *dev, int i2c_index);
119int nv04_tv_create(struct drm_connector *, struct dcb_output *);
120
121/* nv17_tv.c */
122int nv17_tv_create(struct drm_connector *, struct dcb_output *);
123
124static inline bool
125nv_two_heads(struct drm_device *dev)
126{
127 struct nouveau_drm *drm = nouveau_drm(dev);
128 const int impl = dev->pci_device & 0x0ff0;
129
130 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
131 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
132 return true;
133
134 return false;
135}
136
137static inline bool
138nv_gf4_disp_arch(struct drm_device *dev)
139{
140 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
141}
142
143static inline bool
144nv_two_reg_pll(struct drm_device *dev)
145{
146 struct nouveau_drm *drm = nouveau_drm(dev);
147 const int impl = dev->pci_device & 0x0ff0;
148
149 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
150 return true;
151 return false;
152}
153
154static inline bool
155nv_match_device(struct drm_device *dev, unsigned device,
156 unsigned sub_vendor, unsigned sub_device)
157{
158 return dev->pdev->device == device &&
159 dev->pdev->subsystem_vendor == sub_vendor &&
160 dev->pdev->subsystem_device == sub_device;
161}
162
163#include <subdev/bios.h>
164#include <subdev/bios/init.h>
165
166static inline void
167nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
168 struct dcb_output *outp, int crtc)
169{
170 struct nouveau_device *device = nouveau_dev(dev);
171 struct nouveau_bios *bios = nouveau_bios(device);
172 struct nvbios_init init = {
173 .subdev = nv_subdev(bios),
174 .bios = bios,
175 .offset = table,
176 .outp = outp,
177 .crtc = crtc,
178 .execute = 1,
179 };
180
181 nvbios_exec(&init);
182}
183
184#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
deleted file mode 100644
index d5eedd67afe5..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fb.c
+++ /dev/null
@@ -1,55 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_fb_vram_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
11
12 if (boot0 & 0x00000100) {
13 dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
14 dev_priv->vram_size *= 1024 * 1024;
15 } else {
16 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
17 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
18 dev_priv->vram_size = 32 * 1024 * 1024;
19 break;
20 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
21 dev_priv->vram_size = 16 * 1024 * 1024;
22 break;
23 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
24 dev_priv->vram_size = 8 * 1024 * 1024;
25 break;
26 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
27 dev_priv->vram_size = 4 * 1024 * 1024;
28 break;
29 }
30 }
31
32 if ((boot0 & 0x00000038) <= 0x10)
33 dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
34 else
35 dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
36
37 return 0;
38}
39
40int
41nv04_fb_init(struct drm_device *dev)
42{
43 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
44 * nvidia reading PFB_CFG_0, then writing back its original value.
45 * (which was 0x701114 in this case)
46 */
47
48 nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
49 return 0;
50}
51
52void
53nv04_fb_takedown(struct drm_device *dev)
54{
55}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7cd7857347ef..77dcc9c50777 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,19 +22,18 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30 30
31int 31int
32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
33{ 33{
34 struct nouveau_fbdev *nfbdev = info->par; 34 struct nouveau_fbdev *nfbdev = info->par;
35 struct drm_device *dev = nfbdev->dev; 35 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct nouveau_channel *chan = drm->channel;
37 struct nouveau_channel *chan = dev_priv->channel;
38 int ret; 37 int ret;
39 38
40 ret = RING_SPACE(chan, 4); 39 ret = RING_SPACE(chan, 4);
@@ -53,9 +52,8 @@ int
53nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 52nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{ 53{
55 struct nouveau_fbdev *nfbdev = info->par; 54 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev; 55 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 56 struct nouveau_channel *chan = drm->channel;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret; 57 int ret;
60 58
61 ret = RING_SPACE(chan, 7); 59 ret = RING_SPACE(chan, 7);
@@ -81,9 +79,8 @@ int
81nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 79nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
82{ 80{
83 struct nouveau_fbdev *nfbdev = info->par; 81 struct nouveau_fbdev *nfbdev = info->par;
84 struct drm_device *dev = nfbdev->dev; 82 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct nouveau_channel *chan = drm->channel;
86 struct nouveau_channel *chan = dev_priv->channel;
87 uint32_t fg; 84 uint32_t fg;
88 uint32_t bg; 85 uint32_t bg;
89 uint32_t dsize; 86 uint32_t dsize;
@@ -142,9 +139,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
142{ 139{
143 struct nouveau_fbdev *nfbdev = info->par; 140 struct nouveau_fbdev *nfbdev = info->par;
144 struct drm_device *dev = nfbdev->dev; 141 struct drm_device *dev = nfbdev->dev;
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 142 struct nouveau_drm *drm = nouveau_drm(dev);
146 struct nouveau_channel *chan = dev_priv->channel; 143 struct nouveau_channel *chan = drm->channel;
147 const int sub = NvSubCtxSurf2D; 144 struct nouveau_device *device = nv_device(drm->device);
145 struct nouveau_object *object;
148 int surface_fmt, pattern_fmt, rect_fmt; 146 int surface_fmt, pattern_fmt, rect_fmt;
149 int ret; 147 int ret;
150 148
@@ -176,31 +174,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
176 return -EINVAL; 174 return -EINVAL;
177 } 175 }
178 176
179 ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, 177 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
180 dev_priv->card_type >= NV_10 ? 178 device->card_type >= NV_10 ? 0x0062 : 0x0042,
181 0x0062 : 0x0042); 179 NULL, 0, &object);
182 if (ret) 180 if (ret)
183 return ret; 181 return ret;
184 182
185 ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); 183 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
184 0x0019, NULL, 0, &object);
186 if (ret) 185 if (ret)
187 return ret; 186 return ret;
188 187
189 ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); 188 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
189 0x0043, NULL, 0, &object);
190 if (ret) 190 if (ret)
191 return ret; 191 return ret;
192 192
193 ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); 193 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
194 0x0044, NULL, 0, &object);
194 if (ret) 195 if (ret)
195 return ret; 196 return ret;
196 197
197 ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); 198 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
199 0x004a, NULL, 0, &object);
198 if (ret) 200 if (ret)
199 return ret; 201 return ret;
200 202
201 ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, 203 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
202 dev_priv->chipset >= 0x11 ? 204 device->chipset >= 0x11 ? 0x009f : 0x005f,
203 0x009f : 0x005f); 205 NULL, 0, &object);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
206 208
@@ -209,25 +211,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
209 return 0; 211 return 0;
210 } 212 }
211 213
212 BEGIN_NV04(chan, sub, 0x0000, 1); 214 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
213 OUT_RING(chan, NvCtxSurf2D); 215 OUT_RING(chan, NvCtxSurf2D);
214 BEGIN_NV04(chan, sub, 0x0184, 2); 216 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
215 OUT_RING(chan, NvDmaFB); 217 OUT_RING(chan, NvDmaFB);
216 OUT_RING(chan, NvDmaFB); 218 OUT_RING(chan, NvDmaFB);
217 BEGIN_NV04(chan, sub, 0x0300, 4); 219 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
218 OUT_RING(chan, surface_fmt); 220 OUT_RING(chan, surface_fmt);
219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 221 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 222 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 223 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
222 224
223 BEGIN_NV04(chan, sub, 0x0000, 1); 225 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
224 OUT_RING(chan, NvRop); 226 OUT_RING(chan, NvRop);
225 BEGIN_NV04(chan, sub, 0x0300, 1); 227 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
226 OUT_RING(chan, 0x55); 228 OUT_RING(chan, 0x55);
227 229
228 BEGIN_NV04(chan, sub, 0x0000, 1); 230 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
229 OUT_RING(chan, NvImagePatt); 231 OUT_RING(chan, NvImagePatt);
230 BEGIN_NV04(chan, sub, 0x0300, 8); 232 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
231 OUT_RING(chan, pattern_fmt); 233 OUT_RING(chan, pattern_fmt);
232#ifdef __BIG_ENDIAN 234#ifdef __BIG_ENDIAN
233 OUT_RING(chan, 2); 235 OUT_RING(chan, 2);
@@ -241,9 +243,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
241 OUT_RING(chan, ~0); 243 OUT_RING(chan, ~0);
242 OUT_RING(chan, ~0); 244 OUT_RING(chan, ~0);
243 245
244 BEGIN_NV04(chan, sub, 0x0000, 1); 246 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
245 OUT_RING(chan, NvClipRect); 247 OUT_RING(chan, NvClipRect);
246 BEGIN_NV04(chan, sub, 0x0300, 2); 248 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
247 OUT_RING(chan, 0); 249 OUT_RING(chan, 0);
248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 250 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
249 251
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index abe89db6de24..a220b94ba9f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,15 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <engine/fifo.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h" 29#include "nouveau_fence.h"
30 30
31struct nv04_fence_chan { 31struct nv04_fence_chan {
32 struct nouveau_fence_chan base; 32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34}; 33};
35 34
36struct nv04_fence_priv { 35struct nv04_fence_priv {
@@ -57,84 +56,56 @@ nv04_fence_sync(struct nouveau_fence *fence,
57 return -ENODEV; 56 return -ENODEV;
58} 57}
59 58
60int
61nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
62{
63 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
64 atomic_set(&fctx->sequence, data);
65 return 0;
66}
67
68static u32 59static u32
69nv04_fence_read(struct nouveau_channel *chan) 60nv04_fence_read(struct nouveau_channel *chan)
70{ 61{
71 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 62 struct nouveau_fifo_chan *fifo = (void *)chan->object;
72 return atomic_read(&fctx->sequence); 63 return atomic_read(&fifo->refcnt);
73} 64}
74 65
75static void 66static void
76nv04_fence_context_del(struct nouveau_channel *chan, int engine) 67nv04_fence_context_del(struct nouveau_channel *chan)
77{ 68{
78 struct nv04_fence_chan *fctx = chan->engctx[engine]; 69 struct nv04_fence_chan *fctx = chan->fence;
79 nouveau_fence_context_del(&fctx->base); 70 nouveau_fence_context_del(&fctx->base);
80 chan->engctx[engine] = NULL; 71 chan->fence = NULL;
81 kfree(fctx); 72 kfree(fctx);
82} 73}
83 74
84static int 75static int
85nv04_fence_context_new(struct nouveau_channel *chan, int engine) 76nv04_fence_context_new(struct nouveau_channel *chan)
86{ 77{
87 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 78 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 if (fctx) { 79 if (fctx) {
89 nouveau_fence_context_new(&fctx->base); 80 nouveau_fence_context_new(&fctx->base);
90 atomic_set(&fctx->sequence, 0); 81 chan->fence = fctx;
91 chan->engctx[engine] = fctx;
92 return 0; 82 return 0;
93 } 83 }
94 return -ENOMEM; 84 return -ENOMEM;
95} 85}
96 86
97static int
98nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
99{
100 return 0;
101}
102
103static int
104nv04_fence_init(struct drm_device *dev, int engine)
105{
106 return 0;
107}
108
109static void 87static void
110nv04_fence_destroy(struct drm_device *dev, int engine) 88nv04_fence_destroy(struct nouveau_drm *drm)
111{ 89{
112 struct drm_nouveau_private *dev_priv = dev->dev_private; 90 struct nv04_fence_priv *priv = drm->fence;
113 struct nv04_fence_priv *priv = nv_engine(dev, engine); 91 drm->fence = NULL;
114
115 dev_priv->eng[engine] = NULL;
116 kfree(priv); 92 kfree(priv);
117} 93}
118 94
119int 95int
120nv04_fence_create(struct drm_device *dev) 96nv04_fence_create(struct nouveau_drm *drm)
121{ 97{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv04_fence_priv *priv; 98 struct nv04_fence_priv *priv;
124 int ret;
125 99
126 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 100 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
127 if (!priv) 101 if (!priv)
128 return -ENOMEM; 102 return -ENOMEM;
129 103
130 priv->base.engine.destroy = nv04_fence_destroy; 104 priv->base.dtor = nv04_fence_destroy;
131 priv->base.engine.init = nv04_fence_init; 105 priv->base.context_new = nv04_fence_context_new;
132 priv->base.engine.fini = nv04_fence_fini; 106 priv->base.context_del = nv04_fence_context_del;
133 priv->base.engine.context_new = nv04_fence_context_new;
134 priv->base.engine.context_del = nv04_fence_context_del;
135 priv->base.emit = nv04_fence_emit; 107 priv->base.emit = nv04_fence_emit;
136 priv->base.sync = nv04_fence_sync; 108 priv->base.sync = nv04_fence_sync;
137 priv->base.read = nv04_fence_read; 109 priv->base.read = nv04_fence_read;
138 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine; 110 return 0;
139 return ret;
140} 111}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
deleted file mode 100644
index a6295cd00ec7..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ /dev/null
@@ -1,506 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33#include "nouveau_software.h"
34
35static struct ramfc_desc {
36 unsigned bits:6;
37 unsigned ctxs:5;
38 unsigned ctxp:8;
39 unsigned regs:5;
40 unsigned regp;
41} nv04_ramfc[] = {
42 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
43 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
44 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
50 {}
51};
52
53struct nv04_fifo_priv {
54 struct nouveau_fifo_priv base;
55 struct ramfc_desc *ramfc_desc;
56};
57
58struct nv04_fifo_chan {
59 struct nouveau_fifo_chan base;
60 struct nouveau_gpuobj *ramfc;
61};
62
63bool
64nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
65{
66 int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
67
68 if (!enable) {
69 /* In some cases the PFIFO puller may be left in an
70 * inconsistent state if you try to stop it when it's
71 * busy translating handles. Sometimes you get a
72 * PFIFO_CACHE_ERROR, sometimes it just fails silently
73 * sending incorrect instance offsets to PGRAPH after
74 * it's started up again. To avoid the latter we
75 * invalidate the most recently calculated instance.
76 */
77 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
78 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
79 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
80
81 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
82 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
83 nv_wr32(dev, NV03_PFIFO_INTR_0,
84 NV_PFIFO_INTR_CACHE_ERROR);
85
86 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
87 }
88
89 return pull & 1;
90}
91
92static int
93nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
98 struct nv04_fifo_chan *fctx;
99 unsigned long flags;
100 int ret;
101
102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
103 if (!fctx)
104 return -ENOMEM;
105
106 /* map channel control registers */
107 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
108 NV03_USER(chan->id), PAGE_SIZE);
109 if (!chan->user) {
110 ret = -ENOMEM;
111 goto error;
112 }
113
114 /* initialise default fifo context */
115 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
116 chan->id * 32, ~0, 32,
117 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
118 if (ret)
119 goto error;
120
121 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
122 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
123 nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
124 nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
125 nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
126 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
127#ifdef __BIG_ENDIAN
128 NV_PFIFO_CACHE1_BIG_ENDIAN |
129#endif
130 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
131 nv_wo32(fctx->ramfc, 0x14, 0x00000000);
132 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
133 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
134
135 /* enable dma mode on the channel */
136 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
137 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
138 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
139
140error:
141 if (ret)
142 priv->base.base.context_del(chan, engine);
143 return ret;
144}
145
146void
147nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
148{
149 struct drm_device *dev = chan->dev;
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
152 struct nv04_fifo_chan *fctx = chan->engctx[engine];
153 struct ramfc_desc *c = priv->ramfc_desc;
154 unsigned long flags;
155 int chid;
156
157 /* prevent fifo context switches */
158 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
159 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
160
161 /* if this channel is active, replace it with a null context */
162 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
163 if (chid == chan->id) {
164 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
165 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
166 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
167
168 do {
169 u32 mask = ((1ULL << c->bits) - 1) << c->regs;
170 nv_mask(dev, c->regp, mask, 0x00000000);
171 } while ((++c)->bits);
172
173 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
174 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
175 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
176 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
177 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
178 }
179
180 /* restore normal operation, after disabling dma mode */
181 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
182 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
183 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
184
185 /* clean up */
186 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
187 nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
188 if (chan->user) {
189 iounmap(chan->user);
190 chan->user = NULL;
191 }
192}
193
194int
195nv04_fifo_init(struct drm_device *dev, int engine)
196{
197 struct drm_nouveau_private *dev_priv = dev->dev_private;
198 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
199 int i;
200
201 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
202 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
203
204 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
205 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
206
207 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
208 ((dev_priv->ramht->bits - 9) << 16) |
209 (dev_priv->ramht->gpuobj->pinst >> 8));
210 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
211 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
212
213 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
214
215 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
216 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
217
218 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
219 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
220 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
221
222 for (i = 0; i < priv->base.channels; i++) {
223 if (dev_priv->channels.ptr[i])
224 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
225 }
226
227 return 0;
228}
229
230int
231nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
235 struct nouveau_channel *chan;
236 int chid;
237
238 /* prevent context switches and halt fifo operation */
239 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
240 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
241 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
242 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
243
244 /* store current fifo context in ramfc */
245 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
246 chan = dev_priv->channels.ptr[chid];
247 if (suspend && chid != priv->base.channels && chan) {
248 struct nv04_fifo_chan *fctx = chan->engctx[engine];
249 struct nouveau_gpuobj *ctx = fctx->ramfc;
250 struct ramfc_desc *c = priv->ramfc_desc;
251 do {
252 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
253 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
254 u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
255 u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
256 nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
257 } while ((++c)->bits);
258 }
259
260 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
261 return 0;
262}
263
264static bool
265nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
266{
267 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
268 struct drm_nouveau_private *dev_priv = dev->dev_private;
269 struct nouveau_channel *chan = NULL;
270 struct nouveau_gpuobj *obj;
271 unsigned long flags;
272 const int subc = (addr >> 13) & 0x7;
273 const int mthd = addr & 0x1ffc;
274 bool handled = false;
275 u32 engine;
276
277 spin_lock_irqsave(&dev_priv->channels.lock, flags);
278 if (likely(chid >= 0 && chid < pfifo->channels))
279 chan = dev_priv->channels.ptr[chid];
280 if (unlikely(!chan))
281 goto out;
282
283 switch (mthd) {
284 case 0x0000: /* bind object to subchannel */
285 obj = nouveau_ramht_find(chan, data);
286 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
287 break;
288
289 engine = 0x0000000f << (subc * 4);
290
291 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
292 handled = true;
293 break;
294 default:
295 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
296 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
297 break;
298
299 if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
300 mthd, data))
301 handled = true;
302 break;
303 }
304
305out:
306 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
307 return handled;
308}
309
310static const char *nv_dma_state_err(u32 state)
311{
312 static const char * const desc[] = {
313 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
314 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
315 };
316 return desc[(state >> 29) & 0x7];
317}
318
319void
320nv04_fifo_isr(struct drm_device *dev)
321{
322 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
323 struct drm_nouveau_private *dev_priv = dev->dev_private;
324 uint32_t status, reassign;
325 int cnt = 0;
326
327 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
328 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
329 uint32_t chid, get;
330
331 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
332
333 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
334 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
335
336 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
337 uint32_t mthd, data;
338 int ptr;
339
340 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
341 * wrapping on my G80 chips, but CACHE1 isn't big
342 * enough for this much data.. Tests show that it
343 * wraps around to the start at GET=0x800.. No clue
344 * as to why..
345 */
346 ptr = (get & 0x7ff) >> 2;
347
348 if (dev_priv->card_type < NV_40) {
349 mthd = nv_rd32(dev,
350 NV04_PFIFO_CACHE1_METHOD(ptr));
351 data = nv_rd32(dev,
352 NV04_PFIFO_CACHE1_DATA(ptr));
353 } else {
354 mthd = nv_rd32(dev,
355 NV40_PFIFO_CACHE1_METHOD(ptr));
356 data = nv_rd32(dev,
357 NV40_PFIFO_CACHE1_DATA(ptr));
358 }
359
360 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
361 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
362 "Mthd 0x%04x Data 0x%08x\n",
363 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
364 data);
365 }
366
367 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
368 nv_wr32(dev, NV03_PFIFO_INTR_0,
369 NV_PFIFO_INTR_CACHE_ERROR);
370
371 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
372 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
373 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
374 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
375 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
376 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
377
378 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
379 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
380 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
381
382 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
383 }
384
385 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
386 u32 dma_get = nv_rd32(dev, 0x003244);
387 u32 dma_put = nv_rd32(dev, 0x003240);
388 u32 push = nv_rd32(dev, 0x003220);
389 u32 state = nv_rd32(dev, 0x003228);
390
391 if (dev_priv->card_type == NV_50) {
392 u32 ho_get = nv_rd32(dev, 0x003328);
393 u32 ho_put = nv_rd32(dev, 0x003320);
394 u32 ib_get = nv_rd32(dev, 0x003334);
395 u32 ib_put = nv_rd32(dev, 0x003330);
396
397 if (nouveau_ratelimit())
398 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
399 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
400 "State 0x%08x (err: %s) Push 0x%08x\n",
401 chid, ho_get, dma_get, ho_put,
402 dma_put, ib_get, ib_put, state,
403 nv_dma_state_err(state),
404 push);
405
406 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
407 nv_wr32(dev, 0x003364, 0x00000000);
408 if (dma_get != dma_put || ho_get != ho_put) {
409 nv_wr32(dev, 0x003244, dma_put);
410 nv_wr32(dev, 0x003328, ho_put);
411 } else
412 if (ib_get != ib_put) {
413 nv_wr32(dev, 0x003334, ib_put);
414 }
415 } else {
416 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
417 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
418 chid, dma_get, dma_put, state,
419 nv_dma_state_err(state), push);
420
421 if (dma_get != dma_put)
422 nv_wr32(dev, 0x003244, dma_put);
423 }
424
425 nv_wr32(dev, 0x003228, 0x00000000);
426 nv_wr32(dev, 0x003220, 0x00000001);
427 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
428 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
429 }
430
431 if (status & NV_PFIFO_INTR_SEMAPHORE) {
432 uint32_t sem;
433
434 status &= ~NV_PFIFO_INTR_SEMAPHORE;
435 nv_wr32(dev, NV03_PFIFO_INTR_0,
436 NV_PFIFO_INTR_SEMAPHORE);
437
438 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
439 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
440
441 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
442 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
443 }
444
445 if (dev_priv->card_type == NV_50) {
446 if (status & 0x00000010) {
447 nv50_fb_vm_trap(dev, nouveau_ratelimit());
448 status &= ~0x00000010;
449 nv_wr32(dev, 0x002100, 0x00000010);
450 }
451 }
452
453 if (status) {
454 if (nouveau_ratelimit())
455 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
456 status, chid);
457 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
458 status = 0;
459 }
460
461 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
462 }
463
464 if (status) {
465 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
466 nv_wr32(dev, 0x2140, 0);
467 nv_wr32(dev, 0x140, 0);
468 }
469
470 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
471}
472
473void
474nv04_fifo_destroy(struct drm_device *dev, int engine)
475{
476 struct drm_nouveau_private *dev_priv = dev->dev_private;
477 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
478
479 nouveau_irq_unregister(dev, 8);
480
481 dev_priv->eng[engine] = NULL;
482 kfree(priv);
483}
484
485int
486nv04_fifo_create(struct drm_device *dev)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nv04_fifo_priv *priv;
490
491 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
492 if (!priv)
493 return -ENOMEM;
494
495 priv->base.base.destroy = nv04_fifo_destroy;
496 priv->base.base.init = nv04_fifo_init;
497 priv->base.base.fini = nv04_fifo_fini;
498 priv->base.base.context_new = nv04_fifo_context_new;
499 priv->base.base.context_del = nv04_fifo_context_del;
500 priv->base.channels = 15;
501 priv->ramfc_desc = nv04_ramfc;
502 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
503
504 nouveau_irq_register(dev, 8, nv04_fifo_isr);
505 return 0;
506}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
deleted file mode 100644
index 72f1a62903b3..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ /dev/null
@@ -1,1326 +0,0 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
32
33struct nv04_graph_engine {
34 struct nouveau_exec_engine base;
35};
36
37static uint32_t nv04_graph_ctx_regs[] = {
38 0x0040053c,
39 0x00400544,
40 0x00400540,
41 0x00400548,
42 NV04_PGRAPH_CTX_SWITCH1,
43 NV04_PGRAPH_CTX_SWITCH2,
44 NV04_PGRAPH_CTX_SWITCH3,
45 NV04_PGRAPH_CTX_SWITCH4,
46 NV04_PGRAPH_CTX_CACHE1,
47 NV04_PGRAPH_CTX_CACHE2,
48 NV04_PGRAPH_CTX_CACHE3,
49 NV04_PGRAPH_CTX_CACHE4,
50 0x00400184,
51 0x004001a4,
52 0x004001c4,
53 0x004001e4,
54 0x00400188,
55 0x004001a8,
56 0x004001c8,
57 0x004001e8,
58 0x0040018c,
59 0x004001ac,
60 0x004001cc,
61 0x004001ec,
62 0x00400190,
63 0x004001b0,
64 0x004001d0,
65 0x004001f0,
66 0x00400194,
67 0x004001b4,
68 0x004001d4,
69 0x004001f4,
70 0x00400198,
71 0x004001b8,
72 0x004001d8,
73 0x004001f8,
74 0x0040019c,
75 0x004001bc,
76 0x004001dc,
77 0x004001fc,
78 0x00400174,
79 NV04_PGRAPH_DMA_START_0,
80 NV04_PGRAPH_DMA_START_1,
81 NV04_PGRAPH_DMA_LENGTH,
82 NV04_PGRAPH_DMA_MISC,
83 NV04_PGRAPH_DMA_PITCH,
84 NV04_PGRAPH_BOFFSET0,
85 NV04_PGRAPH_BBASE0,
86 NV04_PGRAPH_BLIMIT0,
87 NV04_PGRAPH_BOFFSET1,
88 NV04_PGRAPH_BBASE1,
89 NV04_PGRAPH_BLIMIT1,
90 NV04_PGRAPH_BOFFSET2,
91 NV04_PGRAPH_BBASE2,
92 NV04_PGRAPH_BLIMIT2,
93 NV04_PGRAPH_BOFFSET3,
94 NV04_PGRAPH_BBASE3,
95 NV04_PGRAPH_BLIMIT3,
96 NV04_PGRAPH_BOFFSET4,
97 NV04_PGRAPH_BBASE4,
98 NV04_PGRAPH_BLIMIT4,
99 NV04_PGRAPH_BOFFSET5,
100 NV04_PGRAPH_BBASE5,
101 NV04_PGRAPH_BLIMIT5,
102 NV04_PGRAPH_BPITCH0,
103 NV04_PGRAPH_BPITCH1,
104 NV04_PGRAPH_BPITCH2,
105 NV04_PGRAPH_BPITCH3,
106 NV04_PGRAPH_BPITCH4,
107 NV04_PGRAPH_SURFACE,
108 NV04_PGRAPH_STATE,
109 NV04_PGRAPH_BSWIZZLE2,
110 NV04_PGRAPH_BSWIZZLE5,
111 NV04_PGRAPH_BPIXEL,
112 NV04_PGRAPH_NOTIFY,
113 NV04_PGRAPH_PATT_COLOR0,
114 NV04_PGRAPH_PATT_COLOR1,
115 NV04_PGRAPH_PATT_COLORRAM+0x00,
116 NV04_PGRAPH_PATT_COLORRAM+0x04,
117 NV04_PGRAPH_PATT_COLORRAM+0x08,
118 NV04_PGRAPH_PATT_COLORRAM+0x0c,
119 NV04_PGRAPH_PATT_COLORRAM+0x10,
120 NV04_PGRAPH_PATT_COLORRAM+0x14,
121 NV04_PGRAPH_PATT_COLORRAM+0x18,
122 NV04_PGRAPH_PATT_COLORRAM+0x1c,
123 NV04_PGRAPH_PATT_COLORRAM+0x20,
124 NV04_PGRAPH_PATT_COLORRAM+0x24,
125 NV04_PGRAPH_PATT_COLORRAM+0x28,
126 NV04_PGRAPH_PATT_COLORRAM+0x2c,
127 NV04_PGRAPH_PATT_COLORRAM+0x30,
128 NV04_PGRAPH_PATT_COLORRAM+0x34,
129 NV04_PGRAPH_PATT_COLORRAM+0x38,
130 NV04_PGRAPH_PATT_COLORRAM+0x3c,
131 NV04_PGRAPH_PATT_COLORRAM+0x40,
132 NV04_PGRAPH_PATT_COLORRAM+0x44,
133 NV04_PGRAPH_PATT_COLORRAM+0x48,
134 NV04_PGRAPH_PATT_COLORRAM+0x4c,
135 NV04_PGRAPH_PATT_COLORRAM+0x50,
136 NV04_PGRAPH_PATT_COLORRAM+0x54,
137 NV04_PGRAPH_PATT_COLORRAM+0x58,
138 NV04_PGRAPH_PATT_COLORRAM+0x5c,
139 NV04_PGRAPH_PATT_COLORRAM+0x60,
140 NV04_PGRAPH_PATT_COLORRAM+0x64,
141 NV04_PGRAPH_PATT_COLORRAM+0x68,
142 NV04_PGRAPH_PATT_COLORRAM+0x6c,
143 NV04_PGRAPH_PATT_COLORRAM+0x70,
144 NV04_PGRAPH_PATT_COLORRAM+0x74,
145 NV04_PGRAPH_PATT_COLORRAM+0x78,
146 NV04_PGRAPH_PATT_COLORRAM+0x7c,
147 NV04_PGRAPH_PATT_COLORRAM+0x80,
148 NV04_PGRAPH_PATT_COLORRAM+0x84,
149 NV04_PGRAPH_PATT_COLORRAM+0x88,
150 NV04_PGRAPH_PATT_COLORRAM+0x8c,
151 NV04_PGRAPH_PATT_COLORRAM+0x90,
152 NV04_PGRAPH_PATT_COLORRAM+0x94,
153 NV04_PGRAPH_PATT_COLORRAM+0x98,
154 NV04_PGRAPH_PATT_COLORRAM+0x9c,
155 NV04_PGRAPH_PATT_COLORRAM+0xa0,
156 NV04_PGRAPH_PATT_COLORRAM+0xa4,
157 NV04_PGRAPH_PATT_COLORRAM+0xa8,
158 NV04_PGRAPH_PATT_COLORRAM+0xac,
159 NV04_PGRAPH_PATT_COLORRAM+0xb0,
160 NV04_PGRAPH_PATT_COLORRAM+0xb4,
161 NV04_PGRAPH_PATT_COLORRAM+0xb8,
162 NV04_PGRAPH_PATT_COLORRAM+0xbc,
163 NV04_PGRAPH_PATT_COLORRAM+0xc0,
164 NV04_PGRAPH_PATT_COLORRAM+0xc4,
165 NV04_PGRAPH_PATT_COLORRAM+0xc8,
166 NV04_PGRAPH_PATT_COLORRAM+0xcc,
167 NV04_PGRAPH_PATT_COLORRAM+0xd0,
168 NV04_PGRAPH_PATT_COLORRAM+0xd4,
169 NV04_PGRAPH_PATT_COLORRAM+0xd8,
170 NV04_PGRAPH_PATT_COLORRAM+0xdc,
171 NV04_PGRAPH_PATT_COLORRAM+0xe0,
172 NV04_PGRAPH_PATT_COLORRAM+0xe4,
173 NV04_PGRAPH_PATT_COLORRAM+0xe8,
174 NV04_PGRAPH_PATT_COLORRAM+0xec,
175 NV04_PGRAPH_PATT_COLORRAM+0xf0,
176 NV04_PGRAPH_PATT_COLORRAM+0xf4,
177 NV04_PGRAPH_PATT_COLORRAM+0xf8,
178 NV04_PGRAPH_PATT_COLORRAM+0xfc,
179 NV04_PGRAPH_PATTERN,
180 0x0040080c,
181 NV04_PGRAPH_PATTERN_SHAPE,
182 0x00400600,
183 NV04_PGRAPH_ROP3,
184 NV04_PGRAPH_CHROMA,
185 NV04_PGRAPH_BETA_AND,
186 NV04_PGRAPH_BETA_PREMULT,
187 NV04_PGRAPH_CONTROL0,
188 NV04_PGRAPH_CONTROL1,
189 NV04_PGRAPH_CONTROL2,
190 NV04_PGRAPH_BLEND,
191 NV04_PGRAPH_STORED_FMT,
192 NV04_PGRAPH_SOURCE_COLOR,
193 0x00400560,
194 0x00400568,
195 0x00400564,
196 0x0040056c,
197 0x00400400,
198 0x00400480,
199 0x00400404,
200 0x00400484,
201 0x00400408,
202 0x00400488,
203 0x0040040c,
204 0x0040048c,
205 0x00400410,
206 0x00400490,
207 0x00400414,
208 0x00400494,
209 0x00400418,
210 0x00400498,
211 0x0040041c,
212 0x0040049c,
213 0x00400420,
214 0x004004a0,
215 0x00400424,
216 0x004004a4,
217 0x00400428,
218 0x004004a8,
219 0x0040042c,
220 0x004004ac,
221 0x00400430,
222 0x004004b0,
223 0x00400434,
224 0x004004b4,
225 0x00400438,
226 0x004004b8,
227 0x0040043c,
228 0x004004bc,
229 0x00400440,
230 0x004004c0,
231 0x00400444,
232 0x004004c4,
233 0x00400448,
234 0x004004c8,
235 0x0040044c,
236 0x004004cc,
237 0x00400450,
238 0x004004d0,
239 0x00400454,
240 0x004004d4,
241 0x00400458,
242 0x004004d8,
243 0x0040045c,
244 0x004004dc,
245 0x00400460,
246 0x004004e0,
247 0x00400464,
248 0x004004e4,
249 0x00400468,
250 0x004004e8,
251 0x0040046c,
252 0x004004ec,
253 0x00400470,
254 0x004004f0,
255 0x00400474,
256 0x004004f4,
257 0x00400478,
258 0x004004f8,
259 0x0040047c,
260 0x004004fc,
261 0x00400534,
262 0x00400538,
263 0x00400514,
264 0x00400518,
265 0x0040051c,
266 0x00400520,
267 0x00400524,
268 0x00400528,
269 0x0040052c,
270 0x00400530,
271 0x00400d00,
272 0x00400d40,
273 0x00400d80,
274 0x00400d04,
275 0x00400d44,
276 0x00400d84,
277 0x00400d08,
278 0x00400d48,
279 0x00400d88,
280 0x00400d0c,
281 0x00400d4c,
282 0x00400d8c,
283 0x00400d10,
284 0x00400d50,
285 0x00400d90,
286 0x00400d14,
287 0x00400d54,
288 0x00400d94,
289 0x00400d18,
290 0x00400d58,
291 0x00400d98,
292 0x00400d1c,
293 0x00400d5c,
294 0x00400d9c,
295 0x00400d20,
296 0x00400d60,
297 0x00400da0,
298 0x00400d24,
299 0x00400d64,
300 0x00400da4,
301 0x00400d28,
302 0x00400d68,
303 0x00400da8,
304 0x00400d2c,
305 0x00400d6c,
306 0x00400dac,
307 0x00400d30,
308 0x00400d70,
309 0x00400db0,
310 0x00400d34,
311 0x00400d74,
312 0x00400db4,
313 0x00400d38,
314 0x00400d78,
315 0x00400db8,
316 0x00400d3c,
317 0x00400d7c,
318 0x00400dbc,
319 0x00400590,
320 0x00400594,
321 0x00400598,
322 0x0040059c,
323 0x004005a8,
324 0x004005ac,
325 0x004005b0,
326 0x004005b4,
327 0x004005c0,
328 0x004005c4,
329 0x004005c8,
330 0x004005cc,
331 0x004005d0,
332 0x004005d4,
333 0x004005d8,
334 0x004005dc,
335 0x004005e0,
336 NV04_PGRAPH_PASSTHRU_0,
337 NV04_PGRAPH_PASSTHRU_1,
338 NV04_PGRAPH_PASSTHRU_2,
339 NV04_PGRAPH_DVD_COLORFMT,
340 NV04_PGRAPH_SCALED_FORMAT,
341 NV04_PGRAPH_MISC24_0,
342 NV04_PGRAPH_MISC24_1,
343 NV04_PGRAPH_MISC24_2,
344 0x00400500,
345 0x00400504,
346 NV04_PGRAPH_VALID1,
347 NV04_PGRAPH_VALID2,
348 NV04_PGRAPH_DEBUG_3
349};
350
351struct graph_state {
352 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
353};
354
355static struct nouveau_channel *
356nv04_graph_channel(struct drm_device *dev)
357{
358 struct drm_nouveau_private *dev_priv = dev->dev_private;
359 int chid = 15;
360
361 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
362 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
363
364 if (chid > 15)
365 return NULL;
366
367 return dev_priv->channels.ptr[chid];
368}
369
370static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
371{
372 int i;
373
374 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
375 if (nv04_graph_ctx_regs[i] == reg)
376 return &ctx->nv04[i];
377 }
378
379 return NULL;
380}
381
382static int
383nv04_graph_load_context(struct nouveau_channel *chan)
384{
385 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
386 struct drm_device *dev = chan->dev;
387 uint32_t tmp;
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
391 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
392
393 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
394
395 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
396 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
397
398 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
399 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
400
401 return 0;
402}
403
404static int
405nv04_graph_unload_context(struct drm_device *dev)
406{
407 struct nouveau_channel *chan = NULL;
408 struct graph_state *ctx;
409 uint32_t tmp;
410 int i;
411
412 chan = nv04_graph_channel(dev);
413 if (!chan)
414 return 0;
415 ctx = chan->engctx[NVOBJ_ENGINE_GR];
416
417 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
418 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
419
420 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
421 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
422 tmp |= 15 << 24;
423 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
424 return 0;
425}
426
427static int
428nv04_graph_context_new(struct nouveau_channel *chan, int engine)
429{
430 struct graph_state *pgraph_ctx;
431 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
432
433 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
434 if (pgraph_ctx == NULL)
435 return -ENOMEM;
436
437 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
438
439 chan->engctx[engine] = pgraph_ctx;
440 return 0;
441}
442
443static void
444nv04_graph_context_del(struct nouveau_channel *chan, int engine)
445{
446 struct drm_device *dev = chan->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 struct graph_state *pgraph_ctx = chan->engctx[engine];
449 unsigned long flags;
450
451 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
452 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
453
454 /* Unload the context if it's the currently active one */
455 if (nv04_graph_channel(dev) == chan)
456 nv04_graph_unload_context(dev);
457
458 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
459 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
460
461 /* Free the context resources */
462 kfree(pgraph_ctx);
463 chan->engctx[engine] = NULL;
464}
465
466int
467nv04_graph_object_new(struct nouveau_channel *chan, int engine,
468 u32 handle, u16 class)
469{
470 struct drm_device *dev = chan->dev;
471 struct nouveau_gpuobj *obj = NULL;
472 int ret;
473
474 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
475 if (ret)
476 return ret;
477 obj->engine = 1;
478 obj->class = class;
479
480#ifdef __BIG_ENDIAN
481 nv_wo32(obj, 0x00, 0x00080000 | class);
482#else
483 nv_wo32(obj, 0x00, class);
484#endif
485 nv_wo32(obj, 0x04, 0x00000000);
486 nv_wo32(obj, 0x08, 0x00000000);
487 nv_wo32(obj, 0x0c, 0x00000000);
488
489 ret = nouveau_ramht_insert(chan, handle, obj);
490 nouveau_gpuobj_ref(NULL, &obj);
491 return ret;
492}
493
494static int
495nv04_graph_init(struct drm_device *dev, int engine)
496{
497 uint32_t tmp;
498
499 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
500 ~NV_PMC_ENABLE_PGRAPH);
501 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
502 NV_PMC_ENABLE_PGRAPH);
503
504 /* Enable PGRAPH interrupts */
505 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
506 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
507
508 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
509 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
510 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
511 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
512 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
513 /*1231C000 blob, 001 haiku*/
514 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
515 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
516 /*0x72111100 blob , 01 haiku*/
517 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
518 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
519 /*haiku same*/
520
521 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
522 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
523 /*haiku and blob 10d4*/
524
525 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
526 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
527 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
528 tmp |= 15 << 24;
529 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
530
531 /* These don't belong here, they're part of a per-channel context */
532 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
533 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
534
535 return 0;
536}
537
538static int
539nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
540{
541 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
542 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
543 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
544 return -EBUSY;
545 }
546 nv04_graph_unload_context(dev);
547 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
548 return 0;
549}
550
551/*
552 * Software methods, why they are needed, and how they all work:
553 *
554 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
555 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
556 * 3 words long on both. grobj format on NV04 is:
557 *
558 * word 0:
559 * - bits 0-7: class
560 * - bit 12: color key active
561 * - bit 13: clip rect active
562 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
563 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
564 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
565 * NV03_CONTEXT_SURFACE_DST].
566 * - bits 15-17: 2d operation [aka patch config]
567 * - bit 24: patch valid [enables rendering using this object]
568 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
569 * word 1:
570 * - bits 0-1: mono format
571 * - bits 8-13: color format
572 * - bits 16-31: DMA_NOTIFY instance
573 * word 2:
574 * - bits 0-15: DMA_A instance
575 * - bits 16-31: DMA_B instance
576 *
577 * On NV05 it's:
578 *
579 * word 0:
580 * - bits 0-7: class
581 * - bit 12: color key active
582 * - bit 13: clip rect active
583 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
584 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
585 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
586 * NV03_CONTEXT_SURFACE_DST].
587 * - bits 15-17: 2d operation [aka patch config]
588 * - bits 20-22: dither mode
589 * - bit 24: patch valid [enables rendering using this object]
590 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
591 * - bit 26: surface_src/surface_zeta valid
592 * - bit 27: pattern valid
593 * - bit 28: rop valid
594 * - bit 29: beta1 valid
595 * - bit 30: beta4 valid
596 * word 1:
597 * - bits 0-1: mono format
598 * - bits 8-13: color format
599 * - bits 16-31: DMA_NOTIFY instance
600 * word 2:
601 * - bits 0-15: DMA_A instance
602 * - bits 16-31: DMA_B instance
603 *
604 * NV05 will set/unset the relevant valid bits when you poke the relevant
605 * object-binding methods with object of the proper type, or with the NULL
606 * type. It'll only allow rendering using the grobj if all needed objects
607 * are bound. The needed set of objects depends on selected operation: for
608 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
609 *
610 * NV04 doesn't have these methods implemented at all, and doesn't have the
611 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
612 * is set. So we have to emulate them in software, internally keeping the
613 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
614 * but the last word isn't actually used for anything, we abuse it for this
615 * purpose.
616 *
617 * Actually, NV05 can optionally check bit 24 too, but we disable this since
618 * there's no use for it.
619 *
620 * For unknown reasons, NV04 implements surf3d binding in hardware as an
621 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
622 * methods on the surf3d object, so we have to emulate them too.
623 */
624
625static void
626nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
627{
628 struct drm_device *dev = chan->dev;
629 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
630 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
631 u32 tmp;
632
633 tmp = nv_ri32(dev, instance);
634 tmp &= ~mask;
635 tmp |= value;
636
637 nv_wi32(dev, instance, tmp);
638 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
639 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
640}
641
642static void
643nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
644{
645 struct drm_device *dev = chan->dev;
646 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
647 u32 tmp, ctx1;
648 int class, op, valid = 1;
649
650 ctx1 = nv_ri32(dev, instance);
651 class = ctx1 & 0xff;
652 op = (ctx1 >> 15) & 7;
653 tmp = nv_ri32(dev, instance + 0xc);
654 tmp &= ~mask;
655 tmp |= value;
656 nv_wi32(dev, instance + 0xc, tmp);
657
658 /* check for valid surf2d/surf_dst/surf_color */
659 if (!(tmp & 0x02000000))
660 valid = 0;
661 /* check for valid surf_src/surf_zeta */
662 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
663 valid = 0;
664
665 switch (op) {
666 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
667 case 0:
668 case 3:
669 break;
670 /* ROP_AND: requires pattern and rop */
671 case 1:
672 if (!(tmp & 0x18000000))
673 valid = 0;
674 break;
675 /* BLEND_AND: requires beta1 */
676 case 2:
677 if (!(tmp & 0x20000000))
678 valid = 0;
679 break;
680 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
681 case 4:
682 case 5:
683 if (!(tmp & 0x40000000))
684 valid = 0;
685 break;
686 }
687
688 nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
689}
690
691static int
692nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
693 u32 class, u32 mthd, u32 data)
694{
695 if (data > 5)
696 return 1;
697 /* Old versions of the objects only accept first three operations. */
698 if (data > 2 && class < 0x40)
699 return 1;
700 nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
701 /* changing operation changes set of objects needed for validation */
702 nv04_graph_set_ctx_val(chan, 0, 0);
703 return 0;
704}
705
706static int
707nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
708 u32 class, u32 mthd, u32 data)
709{
710 uint32_t min = data & 0xffff, max;
711 uint32_t w = data >> 16;
712 if (min & 0x8000)
713 /* too large */
714 return 1;
715 if (w & 0x8000)
716 /* yes, it accepts negative for some reason. */
717 w |= 0xffff0000;
718 max = min + w;
719 max &= 0x3ffff;
720 nv_wr32(chan->dev, 0x40053c, min);
721 nv_wr32(chan->dev, 0x400544, max);
722 return 0;
723}
724
725static int
726nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
727 u32 class, u32 mthd, u32 data)
728{
729 uint32_t min = data & 0xffff, max;
730 uint32_t w = data >> 16;
731 if (min & 0x8000)
732 /* too large */
733 return 1;
734 if (w & 0x8000)
735 /* yes, it accepts negative for some reason. */
736 w |= 0xffff0000;
737 max = min + w;
738 max &= 0x3ffff;
739 nv_wr32(chan->dev, 0x400540, min);
740 nv_wr32(chan->dev, 0x400548, max);
741 return 0;
742}
743
744static int
745nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
746 u32 class, u32 mthd, u32 data)
747{
748 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
749 case 0x30:
750 nv04_graph_set_ctx1(chan, 0x00004000, 0);
751 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
752 return 0;
753 case 0x42:
754 nv04_graph_set_ctx1(chan, 0x00004000, 0);
755 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
756 return 0;
757 }
758 return 1;
759}
760
761static int
762nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
763 u32 class, u32 mthd, u32 data)
764{
765 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
766 case 0x30:
767 nv04_graph_set_ctx1(chan, 0x00004000, 0);
768 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
769 return 0;
770 case 0x42:
771 nv04_graph_set_ctx1(chan, 0x00004000, 0);
772 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
773 return 0;
774 case 0x52:
775 nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
776 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
777 return 0;
778 }
779 return 1;
780}
781
782static int
783nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
784 u32 class, u32 mthd, u32 data)
785{
786 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
787 case 0x30:
788 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
789 return 0;
790 case 0x18:
791 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
792 return 0;
793 }
794 return 1;
795}
796
797static int
798nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
799 u32 class, u32 mthd, u32 data)
800{
801 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
802 case 0x30:
803 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
804 return 0;
805 case 0x44:
806 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
807 return 0;
808 }
809 return 1;
810}
811
812static int
813nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
814 u32 class, u32 mthd, u32 data)
815{
816 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
817 case 0x30:
818 nv04_graph_set_ctx_val(chan, 0x10000000, 0);
819 return 0;
820 case 0x43:
821 nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
822 return 0;
823 }
824 return 1;
825}
826
827static int
828nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
829 u32 class, u32 mthd, u32 data)
830{
831 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
832 case 0x30:
833 nv04_graph_set_ctx_val(chan, 0x20000000, 0);
834 return 0;
835 case 0x12:
836 nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
837 return 0;
838 }
839 return 1;
840}
841
842static int
843nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
844 u32 class, u32 mthd, u32 data)
845{
846 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
847 case 0x30:
848 nv04_graph_set_ctx_val(chan, 0x40000000, 0);
849 return 0;
850 case 0x72:
851 nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
852 return 0;
853 }
854 return 1;
855}
856
857static int
858nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
859 u32 class, u32 mthd, u32 data)
860{
861 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
862 case 0x30:
863 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
864 return 0;
865 case 0x58:
866 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
867 return 0;
868 }
869 return 1;
870}
871
872static int
873nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
874 u32 class, u32 mthd, u32 data)
875{
876 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
877 case 0x30:
878 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
879 return 0;
880 case 0x59:
881 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
882 return 0;
883 }
884 return 1;
885}
886
887static int
888nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
889 u32 class, u32 mthd, u32 data)
890{
891 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
892 case 0x30:
893 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
894 return 0;
895 case 0x5a:
896 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
897 return 0;
898 }
899 return 1;
900}
901
902static int
903nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
904 u32 class, u32 mthd, u32 data)
905{
906 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
907 case 0x30:
908 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
909 return 0;
910 case 0x5b:
911 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
912 return 0;
913 }
914 return 1;
915}
916
917static int
918nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
919 u32 class, u32 mthd, u32 data)
920{
921 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
922 case 0x30:
923 nv04_graph_set_ctx1(chan, 0x2000, 0);
924 return 0;
925 case 0x19:
926 nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
927 return 0;
928 }
929 return 1;
930}
931
932static int
933nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
934 u32 class, u32 mthd, u32 data)
935{
936 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
937 case 0x30:
938 nv04_graph_set_ctx1(chan, 0x1000, 0);
939 return 0;
940 /* Yes, for some reason even the old versions of objects
941 * accept 0x57 and not 0x17. Consistency be damned.
942 */
943 case 0x57:
944 nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
945 return 0;
946 }
947 return 1;
948}
949
950static struct nouveau_bitfield nv04_graph_intr[] = {
951 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
952 {}
953};
954
955static struct nouveau_bitfield nv04_graph_nstatus[] = {
956 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
957 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
958 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
959 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
960 {}
961};
962
963struct nouveau_bitfield nv04_graph_nsource[] = {
964 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
965 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
966 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
967 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
968 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
969 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
970 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
971 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
972 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
973 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
974 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
975 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
976 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
977 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
978 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
979 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
980 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
981 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
982 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
983 {}
984};
985
986static void
987nv04_graph_context_switch(struct drm_device *dev)
988{
989 struct drm_nouveau_private *dev_priv = dev->dev_private;
990 struct nouveau_channel *chan = NULL;
991 int chid;
992
993 nouveau_wait_for_idle(dev);
994
995 /* If previous context is valid, we need to save it */
996 nv04_graph_unload_context(dev);
997
998 /* Load context for next channel */
999 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
1000 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
1001 chan = dev_priv->channels.ptr[chid];
1002 if (chan)
1003 nv04_graph_load_context(chan);
1004}
1005
1006static void
1007nv04_graph_isr(struct drm_device *dev)
1008{
1009 u32 stat;
1010
1011 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1012 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1013 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1014 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1015 u32 chid = (addr & 0x0f000000) >> 24;
1016 u32 subc = (addr & 0x0000e000) >> 13;
1017 u32 mthd = (addr & 0x00001ffc);
1018 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1019 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1020 u32 show = stat;
1021
1022 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1023 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1024 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1025 show &= ~NV_PGRAPH_INTR_NOTIFY;
1026 }
1027 }
1028
1029 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1030 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1031 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1032 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1033 nv04_graph_context_switch(dev);
1034 }
1035
1036 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1037 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1038
1039 if (show && nouveau_ratelimit()) {
1040 NV_INFO(dev, "PGRAPH -");
1041 nouveau_bitfield_print(nv04_graph_intr, show);
1042 printk(" nsource:");
1043 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1044 printk(" nstatus:");
1045 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1046 printk("\n");
1047 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1048 "mthd 0x%04x data 0x%08x\n",
1049 chid, subc, class, mthd, data);
1050 }
1051 }
1052}
1053
1054static void
1055nv04_graph_destroy(struct drm_device *dev, int engine)
1056{
1057 struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
1058
1059 nouveau_irq_unregister(dev, 12);
1060
1061 NVOBJ_ENGINE_DEL(dev, GR);
1062 kfree(pgraph);
1063}
1064
1065int
1066nv04_graph_create(struct drm_device *dev)
1067{
1068 struct nv04_graph_engine *pgraph;
1069
1070 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1071 if (!pgraph)
1072 return -ENOMEM;
1073
1074 pgraph->base.destroy = nv04_graph_destroy;
1075 pgraph->base.init = nv04_graph_init;
1076 pgraph->base.fini = nv04_graph_fini;
1077 pgraph->base.context_new = nv04_graph_context_new;
1078 pgraph->base.context_del = nv04_graph_context_del;
1079 pgraph->base.object_new = nv04_graph_object_new;
1080
1081 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1082 nouveau_irq_register(dev, 12, nv04_graph_isr);
1083
1084 /* dvd subpicture */
1085 NVOBJ_CLASS(dev, 0x0038, GR);
1086
1087 /* m2mf */
1088 NVOBJ_CLASS(dev, 0x0039, GR);
1089
1090 /* nv03 gdirect */
1091 NVOBJ_CLASS(dev, 0x004b, GR);
1092 NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
1093 NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
1094 NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
1095 NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
1096 NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
1097
1098 /* nv04 gdirect */
1099 NVOBJ_CLASS(dev, 0x004a, GR);
1100 NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1101 NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
1102 NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
1103 NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
1104 NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
1105 NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
1106
1107 /* nv01 imageblit */
1108 NVOBJ_CLASS(dev, 0x001f, GR);
1109 NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
1110 NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
1111 NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1112 NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
1113 NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
1114 NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
1115 NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
1116 NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
1117
1118 /* nv04 imageblit */
1119 NVOBJ_CLASS(dev, 0x005f, GR);
1120 NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
1121 NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
1122 NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1123 NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
1124 NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
1125 NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
1126 NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
1127 NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
1128
1129 /* nv04 iifc */
1130 NVOBJ_CLASS(dev, 0x0060, GR);
1131 NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
1132 NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
1133 NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
1134 NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
1135 NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
1136 NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
1137 NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
1138 NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
1139
1140 /* nv05 iifc */
1141 NVOBJ_CLASS(dev, 0x0064, GR);
1142
1143 /* nv01 ifc */
1144 NVOBJ_CLASS(dev, 0x0021, GR);
1145 NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
1146 NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
1147 NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1148 NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
1149 NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
1150 NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
1151 NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
1152
1153 /* nv04 ifc */
1154 NVOBJ_CLASS(dev, 0x0061, GR);
1155 NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
1156 NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
1157 NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1158 NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
1159 NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
1160 NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
1161 NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
1162 NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
1163
1164 /* nv05 ifc */
1165 NVOBJ_CLASS(dev, 0x0065, GR);
1166
1167 /* nv03 sifc */
1168 NVOBJ_CLASS(dev, 0x0036, GR);
1169 NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
1170 NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1171 NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
1172 NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
1173 NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
1174 NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
1175
1176 /* nv04 sifc */
1177 NVOBJ_CLASS(dev, 0x0076, GR);
1178 NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
1179 NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1180 NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
1181 NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
1182 NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
1183 NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
1184 NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
1185
1186 /* nv05 sifc */
1187 NVOBJ_CLASS(dev, 0x0066, GR);
1188
1189 /* nv03 sifm */
1190 NVOBJ_CLASS(dev, 0x0037, GR);
1191 NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1192 NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
1193 NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
1194 NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
1195 NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
1196
1197 /* nv04 sifm */
1198 NVOBJ_CLASS(dev, 0x0077, GR);
1199 NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1200 NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
1201 NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
1202 NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
1203 NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
1204 NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
1205
1206 /* null */
1207 NVOBJ_CLASS(dev, 0x0030, GR);
1208
1209 /* surf2d */
1210 NVOBJ_CLASS(dev, 0x0042, GR);
1211
1212 /* rop */
1213 NVOBJ_CLASS(dev, 0x0043, GR);
1214
1215 /* beta1 */
1216 NVOBJ_CLASS(dev, 0x0012, GR);
1217
1218 /* beta4 */
1219 NVOBJ_CLASS(dev, 0x0072, GR);
1220
1221 /* cliprect */
1222 NVOBJ_CLASS(dev, 0x0019, GR);
1223
1224 /* nv01 pattern */
1225 NVOBJ_CLASS(dev, 0x0018, GR);
1226
1227 /* nv04 pattern */
1228 NVOBJ_CLASS(dev, 0x0044, GR);
1229
1230 /* swzsurf */
1231 NVOBJ_CLASS(dev, 0x0052, GR);
1232
1233 /* surf3d */
1234 NVOBJ_CLASS(dev, 0x0053, GR);
1235 NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
1236 NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
1237
1238 /* nv03 tex_tri */
1239 NVOBJ_CLASS(dev, 0x0048, GR);
1240 NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
1241 NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
1242 NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
1243
1244 /* tex_tri */
1245 NVOBJ_CLASS(dev, 0x0054, GR);
1246
1247 /* multitex_tri */
1248 NVOBJ_CLASS(dev, 0x0055, GR);
1249
1250 /* nv01 chroma */
1251 NVOBJ_CLASS(dev, 0x0017, GR);
1252
1253 /* nv04 chroma */
1254 NVOBJ_CLASS(dev, 0x0057, GR);
1255
1256 /* surf_dst */
1257 NVOBJ_CLASS(dev, 0x0058, GR);
1258
1259 /* surf_src */
1260 NVOBJ_CLASS(dev, 0x0059, GR);
1261
1262 /* surf_color */
1263 NVOBJ_CLASS(dev, 0x005a, GR);
1264
1265 /* surf_zeta */
1266 NVOBJ_CLASS(dev, 0x005b, GR);
1267
1268 /* nv01 line */
1269 NVOBJ_CLASS(dev, 0x001c, GR);
1270 NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
1271 NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1272 NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
1273 NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
1274 NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
1275 NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
1276
1277 /* nv04 line */
1278 NVOBJ_CLASS(dev, 0x005c, GR);
1279 NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
1280 NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1281 NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
1282 NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
1283 NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
1284 NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
1285 NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
1286
1287 /* nv01 tri */
1288 NVOBJ_CLASS(dev, 0x001d, GR);
1289 NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
1290 NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1291 NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
1292 NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
1293 NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
1294 NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
1295
1296 /* nv04 tri */
1297 NVOBJ_CLASS(dev, 0x005d, GR);
1298 NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
1299 NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1300 NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
1301 NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
1302 NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
1303 NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
1304 NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
1305
1306 /* nv01 rect */
1307 NVOBJ_CLASS(dev, 0x001e, GR);
1308 NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
1309 NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1310 NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
1311 NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
1312 NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
1313 NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
1314
1315 /* nv04 rect */
1316 NVOBJ_CLASS(dev, 0x005e, GR);
1317 NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
1318 NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1319 NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
1320 NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
1321 NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
1322 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1323 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1324
1325 return 0;
1326}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
deleted file mode 100644
index ef7a934a499a..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ /dev/null
@@ -1,193 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3
4#include "nouveau_drv.h"
5#include "nouveau_fifo.h"
6#include "nouveau_ramht.h"
7
8/* returns the size of fifo context */
9static int
10nouveau_fifo_ctx_size(struct drm_device *dev)
11{
12 struct drm_nouveau_private *dev_priv = dev->dev_private;
13
14 if (dev_priv->chipset >= 0x40)
15 return 128 * 32;
16 else
17 if (dev_priv->chipset >= 0x17)
18 return 64 * 32;
19 else
20 if (dev_priv->chipset >= 0x10)
21 return 32 * 32;
22
23 return 32 * 16;
24}
25
26int nv04_instmem_init(struct drm_device *dev)
27{
28 struct drm_nouveau_private *dev_priv = dev->dev_private;
29 struct nouveau_gpuobj *ramht = NULL;
30 u32 offset, length;
31 int ret;
32
33 /* RAMIN always available */
34 dev_priv->ramin_available = true;
35
36 /* Reserve space at end of VRAM for PRAMIN */
37 if (dev_priv->card_type >= NV_40) {
38 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
39 u32 rsvd;
40
41 /* estimate grctx size, the magics come from nv40_grctx.c */
42 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
43 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
44 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
45 else rsvd = 0x4a40 * vs;
46 rsvd += 16 * 1024;
47 rsvd *= 32; /* per-channel */
48
49 rsvd += 512 * 1024; /* pci(e)gart table */
50 rsvd += 512 * 1024; /* object storage */
51
52 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
53 } else {
54 dev_priv->ramin_rsvd_vram = 512 * 1024;
55 }
56
57 /* Setup shared RAMHT */
58 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
59 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
60 if (ret)
61 return ret;
62
63 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
64 nouveau_gpuobj_ref(NULL, &ramht);
65 if (ret)
66 return ret;
67
68 /* And RAMRO */
69 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
70 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
71 if (ret)
72 return ret;
73
74 /* And RAMFC */
75 length = nouveau_fifo_ctx_size(dev);
76 switch (dev_priv->card_type) {
77 case NV_40:
78 offset = 0x20000;
79 break;
80 default:
81 offset = 0x11400;
82 break;
83 }
84
85 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
86 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
87 if (ret)
88 return ret;
89
90 /* Only allow space after RAMFC to be used for object allocation */
91 offset += length;
92
93 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
94 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
95 * ("new style" control) the upper 16-bits of 0x2220 points at this
96 * other mysterious table that's clobbering important things.
97 *
98 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
99 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
100 */
101 if (dev_priv->card_type >= NV_40) {
102 if (offset < 0x40000)
103 offset = 0x40000;
104 }
105
106 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
107 dev_priv->ramin_rsvd_vram - offset);
108 if (ret) {
109 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
110 return ret;
111 }
112
113 return 0;
114}
115
116void
117nv04_instmem_takedown(struct drm_device *dev)
118{
119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120
121 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
123 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
124
125 if (drm_mm_initialized(&dev_priv->ramin_heap))
126 drm_mm_takedown(&dev_priv->ramin_heap);
127}
128
129int
130nv04_instmem_suspend(struct drm_device *dev)
131{
132 return 0;
133}
134
135void
136nv04_instmem_resume(struct drm_device *dev)
137{
138}
139
140int
141nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
142 u32 size, u32 align)
143{
144 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
145 struct drm_mm_node *ramin = NULL;
146
147 do {
148 if (drm_mm_pre_get(&dev_priv->ramin_heap))
149 return -ENOMEM;
150
151 spin_lock(&dev_priv->ramin_lock);
152 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
153 if (ramin == NULL) {
154 spin_unlock(&dev_priv->ramin_lock);
155 return -ENOMEM;
156 }
157
158 ramin = drm_mm_get_block_atomic(ramin, size, align);
159 spin_unlock(&dev_priv->ramin_lock);
160 } while (ramin == NULL);
161
162 gpuobj->node = ramin;
163 gpuobj->vinst = ramin->start;
164 return 0;
165}
166
167void
168nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
169{
170 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
171
172 spin_lock(&dev_priv->ramin_lock);
173 drm_mm_put_block(gpuobj->node);
174 gpuobj->node = NULL;
175 spin_unlock(&dev_priv->ramin_lock);
176}
177
178int
179nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
180{
181 gpuobj->pinst = gpuobj->vinst;
182 return 0;
183}
184
185void
186nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
187{
188}
189
190void
191nv04_instmem_flush(struct drm_device *dev)
192{
193}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
deleted file mode 100644
index 2af43a1cb2ec..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_mc.c
+++ /dev/null
@@ -1,24 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_mc_init(struct drm_device *dev)
8{
9 /* Power up everything, resetting each individual unit will
10 * be done later if needed.
11 */
12
13 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
14
15 /* Disable PROM access. */
16 nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
17
18 return 0;
19}
20
21void
22nv04_mc_takedown(struct drm_device *dev)
23{
24}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 6e7589918fa9..410be011c2f0 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -23,10 +23,15 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h"
27#include "nouveau_hw.h" 28#include "nouveau_hw.h"
28#include "nouveau_pm.h" 29#include "nouveau_pm.h"
29 30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
30int 35int
31nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 36nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
32{ 37{
@@ -46,7 +51,7 @@ nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
46} 51}
47 52
48struct nv04_pm_clock { 53struct nv04_pm_clock {
49 struct pll_lims pll; 54 struct nvbios_pll pll;
50 struct nouveau_pll_vals calc; 55 struct nouveau_pll_vals calc;
51}; 56};
52 57
@@ -58,13 +63,16 @@ struct nv04_pm_state {
58static int 63static int
59calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk) 64calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
60{ 65{
66 struct nouveau_device *device = nouveau_dev(dev);
67 struct nouveau_bios *bios = nouveau_bios(device);
68 struct nouveau_clock *pclk = nouveau_clock(device);
61 int ret; 69 int ret;
62 70
63 ret = get_pll_limits(dev, id, &clk->pll); 71 ret = nvbios_pll_parse(bios, id, &clk->pll);
64 if (ret) 72 if (ret)
65 return ret; 73 return ret;
66 74
67 ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc); 75 ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
68 if (!ret) 76 if (!ret)
69 return -EINVAL; 77 return -EINVAL;
70 78
@@ -100,37 +108,38 @@ error:
100static void 108static void
101prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk) 109prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
102{ 110{
103 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct nouveau_device *device = nouveau_dev(dev);
112 struct nouveau_clock *pclk = nouveau_clock(device);
104 u32 reg = clk->pll.reg; 113 u32 reg = clk->pll.reg;
105 114
106 /* thank the insane nouveau_hw_setpll() interface for this */ 115 /* thank the insane nouveau_hw_setpll() interface for this */
107 if (dev_priv->card_type >= NV_40) 116 if (device->card_type >= NV_40)
108 reg += 4; 117 reg += 4;
109 118
110 nouveau_hw_setpll(dev, reg, &clk->calc); 119 pclk->pll_prog(pclk, reg, &clk->calc);
111} 120}
112 121
113int 122int
114nv04_pm_clocks_set(struct drm_device *dev, void *pre_state) 123nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
115{ 124{
116 struct drm_nouveau_private *dev_priv = dev->dev_private; 125 struct nouveau_device *device = nouveau_dev(dev);
117 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 126 struct nouveau_timer *ptimer = nouveau_timer(device);
118 struct nv04_pm_state *state = pre_state; 127 struct nv04_pm_state *state = pre_state;
119 128
120 prog_pll(dev, &state->core); 129 prog_pll(dev, &state->core);
121 130
122 if (state->memory.pll.reg) { 131 if (state->memory.pll.reg) {
123 prog_pll(dev, &state->memory); 132 prog_pll(dev, &state->memory);
124 if (dev_priv->card_type < NV_30) { 133 if (device->card_type < NV_30) {
125 if (dev_priv->card_type == NV_20) 134 if (device->card_type == NV_20)
126 nv_mask(dev, 0x1002c4, 0, 1 << 20); 135 nv_mask(device, 0x1002c4, 0, 1 << 20);
127 136
128 /* Reset the DLLs */ 137 /* Reset the DLLs */
129 nv_mask(dev, 0x1002c0, 0, 1 << 8); 138 nv_mask(device, 0x1002c0, 0, 1 << 8);
130 } 139 }
131 } 140 }
132 141
133 ptimer->init(dev); 142 nv_ofuncs(ptimer)->init(nv_object(ptimer));
134 143
135 kfree(state); 144 kfree(state);
136 return 0; 145 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
deleted file mode 100644
index 0c41abf48774..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30#include "nouveau_software.h"
31#include "nouveau_hw.h"
32
33struct nv04_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv04_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44
45 struct nouveau_page_flip_state state;
46
47 if (!nouveau_finish_page_flip(chan, &state)) {
48 nv_set_crtc_base(chan->dev, state.crtc, state.offset +
49 state.y * state.pitch +
50 state.x * state.bpp / 8);
51 }
52
53 return 0;
54}
55
56static int
57nv04_software_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct nv04_software_chan *pch;
60
61 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
62 if (!pch)
63 return -ENOMEM;
64
65 nouveau_software_context_new(&pch->base);
66 chan->engctx[engine] = pch;
67 return 0;
68}
69
70static void
71nv04_software_context_del(struct nouveau_channel *chan, int engine)
72{
73 struct nv04_software_chan *pch = chan->engctx[engine];
74 chan->engctx[engine] = NULL;
75 kfree(pch);
76}
77
78static int
79nv04_software_object_new(struct nouveau_channel *chan, int engine,
80 u32 handle, u16 class)
81{
82 struct drm_device *dev = chan->dev;
83 struct nouveau_gpuobj *obj = NULL;
84 int ret;
85
86 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
87 if (ret)
88 return ret;
89 obj->engine = 0;
90 obj->class = class;
91
92 ret = nouveau_ramht_insert(chan, handle, obj);
93 nouveau_gpuobj_ref(NULL, &obj);
94 return ret;
95}
96
97static int
98nv04_software_init(struct drm_device *dev, int engine)
99{
100 return 0;
101}
102
103static int
104nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
105{
106 return 0;
107}
108
109static void
110nv04_software_destroy(struct drm_device *dev, int engine)
111{
112 struct nv04_software_priv *psw = nv_engine(dev, engine);
113
114 NVOBJ_ENGINE_DEL(dev, SW);
115 kfree(psw);
116}
117
118int
119nv04_software_create(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv04_software_priv *psw;
123
124 psw = kzalloc(sizeof(*psw), GFP_KERNEL);
125 if (!psw)
126 return -ENOMEM;
127
128 psw->base.base.destroy = nv04_software_destroy;
129 psw->base.base.init = nv04_software_init;
130 psw->base.base.fini = nv04_software_fini;
131 psw->base.base.context_new = nv04_software_context_new;
132 psw->base.base.context_del = nv04_software_context_del;
133 psw->base.base.object_new = nv04_software_object_new;
134 nouveau_software_create(&psw->base);
135
136 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
137 if (dev_priv->card_type <= NV_04) {
138 NVOBJ_CLASS(dev, 0x006e, SW);
139 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
140 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
141 } else {
142 NVOBJ_CLASS(dev, 0x016e, SW);
143 NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
deleted file mode 100644
index 55c945290e52..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ /dev/null
@@ -1,84 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5#include "nouveau_hw.h"
6
7int
8nv04_timer_init(struct drm_device *dev)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 u32 m, n, d;
12
13 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
14 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
15
16 /* aim for 31.25MHz, which gives us nanosecond timestamps */
17 d = 1000000 / 32;
18
19 /* determine base clock for timer source */
20 if (dev_priv->chipset < 0x40) {
21 n = nouveau_hw_get_clock(dev, PLL_CORE);
22 } else
23 if (dev_priv->chipset == 0x40) {
24 /*XXX: figure this out */
25 n = 0;
26 } else {
27 n = dev_priv->crystal;
28 m = 1;
29 while (n < (d * 2)) {
30 n += (n / m);
31 m++;
32 }
33
34 nv_wr32(dev, 0x009220, m - 1);
35 }
36
37 if (!n) {
38 NV_WARN(dev, "PTIMER: unknown input clock freq\n");
39 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
40 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
41 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
42 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
43 }
44 return 0;
45 }
46
47 /* reduce ratio to acceptable values */
48 while (((n % 5) == 0) && ((d % 5) == 0)) {
49 n /= 5;
50 d /= 5;
51 }
52
53 while (((n % 2) == 0) && ((d % 2) == 0)) {
54 n /= 2;
55 d /= 2;
56 }
57
58 while (n > 0xffff || d > 0xffff) {
59 n >>= 1;
60 d >>= 1;
61 }
62
63 nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
64 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
65 return 0;
66}
67
68u64
69nv04_timer_read(struct drm_device *dev)
70{
71 u32 hi, lo;
72
73 do {
74 hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
75 lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
76 } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
77
78 return ((u64)hi << 32 | lo);
79}
80
81void
82nv04_timer_takedown(struct drm_device *dev)
83{
84}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 3eb605ddfd03..45c5c039e7e4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -25,7 +25,8 @@
25 */ 25 */
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "nouveau_drv.h" 28#include "nouveau_drm.h"
29#include "nouveau_reg.h"
29#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
30#include "nouveau_connector.h" 31#include "nouveau_connector.h"
31#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
@@ -34,6 +35,8 @@
34 35
35#include "i2c/ch7006.h" 36#include "i2c/ch7006.h"
36 37
38#include <subdev/i2c.h>
39
37static struct i2c_board_info nv04_tv_encoder_info[] = { 40static struct i2c_board_info nv04_tv_encoder_info[] = {
38 { 41 {
39 I2C_BOARD_INFO("ch7006", 0x75), 42 I2C_BOARD_INFO("ch7006", 0x75),
@@ -49,8 +52,11 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
49 52
50int nv04_tv_identify(struct drm_device *dev, int i2c_index) 53int nv04_tv_identify(struct drm_device *dev, int i2c_index)
51{ 54{
52 return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info, 55 struct nouveau_drm *drm = nouveau_drm(dev);
53 NULL, i2c_index); 56 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
57
58 return i2c->identify(i2c, i2c_index, "TV encoder",
59 nv04_tv_encoder_info, NULL);
54} 60}
55 61
56 62
@@ -64,12 +70,12 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
64static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) 70static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
65{ 71{
66 struct drm_device *dev = encoder->dev; 72 struct drm_device *dev = encoder->dev;
73 struct nouveau_drm *drm = nouveau_drm(dev);
67 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 74 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
68 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
69 struct nv04_mode_state *state = &dev_priv->mode_reg;
70 uint8_t crtc1A; 76 uint8_t crtc1A;
71 77
72 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", 78 NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
73 mode, nv_encoder->dcb->index); 79 mode, nv_encoder->dcb->index);
74 80
75 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); 81 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
@@ -94,8 +100,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
94 100
95static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) 101static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
96{ 102{
97 struct drm_nouveau_private *dev_priv = dev->dev_private; 103 struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head];
98 struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
99 104
100 state->tv_setup = 0; 105 state->tv_setup = 0;
101 106
@@ -133,9 +138,8 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
133 struct drm_display_mode *adjusted_mode) 138 struct drm_display_mode *adjusted_mode)
134{ 139{
135 struct drm_device *dev = encoder->dev; 140 struct drm_device *dev = encoder->dev;
136 struct drm_nouveau_private *dev_priv = dev->dev_private;
137 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 141 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
138 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 142 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
139 143
140 regp->tv_htotal = adjusted_mode->htotal; 144 regp->tv_htotal = adjusted_mode->htotal;
141 regp->tv_vtotal = adjusted_mode->vtotal; 145 regp->tv_vtotal = adjusted_mode->vtotal;
@@ -157,12 +161,13 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
157{ 161{
158 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 162 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
159 struct drm_device *dev = encoder->dev; 163 struct drm_device *dev = encoder->dev;
164 struct nouveau_drm *drm = nouveau_drm(dev);
160 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 165 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
161 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 166 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
162 167
163 helper->dpms(encoder, DRM_MODE_DPMS_ON); 168 helper->dpms(encoder, DRM_MODE_DPMS_ON);
164 169
165 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 170 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
166 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, 171 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
167 '@' + ffs(nv_encoder->dcb->or)); 172 '@' + ffs(nv_encoder->dcb->or));
168} 173}
@@ -181,15 +186,16 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
181}; 186};
182 187
183int 188int
184nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) 189nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
185{ 190{
186 struct nouveau_encoder *nv_encoder; 191 struct nouveau_encoder *nv_encoder;
187 struct drm_encoder *encoder; 192 struct drm_encoder *encoder;
188 struct drm_device *dev = connector->dev; 193 struct drm_device *dev = connector->dev;
189 struct drm_encoder_helper_funcs *hfuncs; 194 struct drm_encoder_helper_funcs *hfuncs;
190 struct drm_encoder_slave_funcs *sfuncs; 195 struct drm_encoder_slave_funcs *sfuncs;
191 struct nouveau_i2c_chan *i2c = 196 struct nouveau_drm *drm = nouveau_drm(dev);
192 nouveau_i2c_find(dev, entry->i2c_index); 197 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
198 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
193 int type, ret; 199 int type, ret;
194 200
195 /* Ensure that we can talk to this encoder */ 201 /* Ensure that we can talk to this encoder */
@@ -221,7 +227,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
221 227
222 /* Run the slave-specific initialization */ 228 /* Run the slave-specific initialization */
223 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 229 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
224 &i2c->adapter, &nv04_tv_encoder_info[type]); 230 &port->adapter, &nv04_tv_encoder_info[type]);
225 if (ret < 0) 231 if (ret < 0)
226 goto fail_cleanup; 232 goto fail_cleanup;
227 233
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
deleted file mode 100644
index 420b1608536d..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ /dev/null
@@ -1,104 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6void
7nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
8 uint32_t size, uint32_t pitch, uint32_t flags)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12
13 tile->addr = 0x80000000 | addr;
14 tile->limit = max(1u, addr + size) - 1;
15 tile->pitch = pitch;
16}
17
18void
19nv10_fb_free_tile_region(struct drm_device *dev, int i)
20{
21 struct drm_nouveau_private *dev_priv = dev->dev_private;
22 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
23
24 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
25}
26
27void
28nv10_fb_set_tile_region(struct drm_device *dev, int i)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
32
33 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
34 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
35 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
36}
37
38int
39nv1a_fb_vram_init(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct pci_dev *bridge;
43 uint32_t mem, mib;
44
45 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
46 if (!bridge) {
47 NV_ERROR(dev, "no bridge device\n");
48 return 0;
49 }
50
51 if (dev_priv->chipset == 0x1a) {
52 pci_read_config_dword(bridge, 0x7c, &mem);
53 mib = ((mem >> 6) & 31) + 1;
54 } else {
55 pci_read_config_dword(bridge, 0x84, &mem);
56 mib = ((mem >> 4) & 127) + 1;
57 }
58
59 dev_priv->vram_size = mib * 1024 * 1024;
60 return 0;
61}
62
63int
64nv10_fb_vram_init(struct drm_device *dev)
65{
66 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
68 u32 cfg0 = nv_rd32(dev, 0x100200);
69
70 dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
71
72 if (cfg0 & 0x00000001)
73 dev_priv->vram_type = NV_MEM_TYPE_DDR1;
74 else
75 dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
76
77 return 0;
78}
79
80int
81nv10_fb_init(struct drm_device *dev)
82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
84 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
85 int i;
86
87 /* Turn all the tiling regions off. */
88 pfb->num_tiles = NV10_PFB_TILE__SIZE;
89 for (i = 0; i < pfb->num_tiles; i++)
90 pfb->set_tile_region(dev, i);
91
92 return 0;
93}
94
95void
96nv10_fb_takedown(struct drm_device *dev)
97{
98 struct drm_nouveau_private *dev_priv = dev->dev_private;
99 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
100 int i;
101
102 for (i = 0; i < pfb->num_tiles; i++)
103 pfb->free_tile_region(dev, i);
104}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 8a1b75009185..ce752bf5cc4e 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,10 +22,11 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include "nouveau_drm.h"
27#include "nouveau_dma.h" 29#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h" 30#include "nouveau_fence.h"
30 31
31struct nv10_fence_chan { 32struct nv10_fence_chan {
@@ -39,7 +40,7 @@ struct nv10_fence_priv {
39 u32 sequence; 40 u32 sequence;
40}; 41};
41 42
42static int 43int
43nv10_fence_emit(struct nouveau_fence *fence) 44nv10_fence_emit(struct nouveau_fence *fence)
44{ 45{
45 struct nouveau_channel *chan = fence->channel; 46 struct nouveau_channel *chan = fence->channel;
@@ -60,15 +61,15 @@ nv10_fence_sync(struct nouveau_fence *fence,
60 return -ENODEV; 61 return -ENODEV;
61} 62}
62 63
63static int 64int
64nv17_fence_sync(struct nouveau_fence *fence, 65nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan) 66 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{ 67{
67 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 68 struct nv10_fence_priv *priv = chan->drm->fence;
68 u32 value; 69 u32 value;
69 int ret; 70 int ret;
70 71
71 if (!mutex_trylock(&prev->mutex)) 72 if (!mutex_trylock(&prev->cli->mutex))
72 return -EBUSY; 73 return -EBUSY;
73 74
74 spin_lock(&priv->lock); 75 spin_lock(&priv->lock);
@@ -95,34 +96,33 @@ nv17_fence_sync(struct nouveau_fence *fence,
95 FIRE_RING (chan); 96 FIRE_RING (chan);
96 } 97 }
97 98
98 mutex_unlock(&prev->mutex); 99 mutex_unlock(&prev->cli->mutex);
99 return 0; 100 return 0;
100} 101}
101 102
102static u32 103u32
103nv10_fence_read(struct nouveau_channel *chan) 104nv10_fence_read(struct nouveau_channel *chan)
104{ 105{
105 return nvchan_rd32(chan, 0x0048); 106 return nv_ro32(chan->object, 0x0048);
106} 107}
107 108
108static void 109void
109nv10_fence_context_del(struct nouveau_channel *chan, int engine) 110nv10_fence_context_del(struct nouveau_channel *chan)
110{ 111{
111 struct nv10_fence_chan *fctx = chan->engctx[engine]; 112 struct nv10_fence_chan *fctx = chan->fence;
112 nouveau_fence_context_del(&fctx->base); 113 nouveau_fence_context_del(&fctx->base);
113 chan->engctx[engine] = NULL; 114 chan->fence = NULL;
114 kfree(fctx); 115 kfree(fctx);
115} 116}
116 117
117static int 118static int
118nv10_fence_context_new(struct nouveau_channel *chan, int engine) 119nv10_fence_context_new(struct nouveau_channel *chan)
119{ 120{
120 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine); 121 struct nv10_fence_priv *priv = chan->drm->fence;
121 struct nv10_fence_chan *fctx; 122 struct nv10_fence_chan *fctx;
122 struct nouveau_gpuobj *obj;
123 int ret = 0; 123 int ret = 0;
124 124
125 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 125 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx) 126 if (!fctx)
127 return -ENOMEM; 127 return -ENOMEM;
128 128
@@ -130,69 +130,56 @@ nv10_fence_context_new(struct nouveau_channel *chan, int engine)
130 130
131 if (priv->bo) { 131 if (priv->bo) {
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
133 133 struct nouveau_object *object;
134 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 134 u32 start = mem->start * PAGE_SIZE;
135 mem->start * PAGE_SIZE, mem->size, 135 u32 limit = mem->start + mem->size - 1;
136 NV_MEM_ACCESS_RW, 136
137 NV_MEM_TARGET_VRAM, &obj); 137 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
138 if (!ret) { 138 NvSema, 0x0002,
139 ret = nouveau_ramht_insert(chan, NvSema, obj); 139 &(struct nv_dma_class) {
140 nouveau_gpuobj_ref(NULL, &obj); 140 .flags = NV_DMA_TARGET_VRAM |
141 } 141 NV_DMA_ACCESS_RDWR,
142 .start = start,
143 .limit = limit,
144 }, sizeof(struct nv_dma_class),
145 &object);
142 } 146 }
143 147
144 if (ret) 148 if (ret)
145 nv10_fence_context_del(chan, engine); 149 nv10_fence_context_del(chan);
146 return ret; 150 return ret;
147} 151}
148 152
149static int 153void
150nv10_fence_fini(struct drm_device *dev, int engine, bool suspend) 154nv10_fence_destroy(struct nouveau_drm *drm)
151{ 155{
152 return 0; 156 struct nv10_fence_priv *priv = drm->fence;
153} 157 nouveau_bo_unmap(priv->bo);
154
155static int
156nv10_fence_init(struct drm_device *dev, int engine)
157{
158 return 0;
159}
160
161static void
162nv10_fence_destroy(struct drm_device *dev, int engine)
163{
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nv10_fence_priv *priv = nv_engine(dev, engine);
166
167 nouveau_bo_ref(NULL, &priv->bo); 158 nouveau_bo_ref(NULL, &priv->bo);
168 dev_priv->eng[engine] = NULL; 159 drm->fence = NULL;
169 kfree(priv); 160 kfree(priv);
170} 161}
171 162
172int 163int
173nv10_fence_create(struct drm_device *dev) 164nv10_fence_create(struct nouveau_drm *drm)
174{ 165{
175 struct drm_nouveau_private *dev_priv = dev->dev_private;
176 struct nv10_fence_priv *priv; 166 struct nv10_fence_priv *priv;
177 int ret = 0; 167 int ret = 0;
178 168
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 169 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv) 170 if (!priv)
181 return -ENOMEM; 171 return -ENOMEM;
182 172
183 priv->base.engine.destroy = nv10_fence_destroy; 173 priv->base.dtor = nv10_fence_destroy;
184 priv->base.engine.init = nv10_fence_init; 174 priv->base.context_new = nv10_fence_context_new;
185 priv->base.engine.fini = nv10_fence_fini; 175 priv->base.context_del = nv10_fence_context_del;
186 priv->base.engine.context_new = nv10_fence_context_new;
187 priv->base.engine.context_del = nv10_fence_context_del;
188 priv->base.emit = nv10_fence_emit; 176 priv->base.emit = nv10_fence_emit;
189 priv->base.read = nv10_fence_read; 177 priv->base.read = nv10_fence_read;
190 priv->base.sync = nv10_fence_sync; 178 priv->base.sync = nv10_fence_sync;
191 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192 spin_lock_init(&priv->lock); 179 spin_lock_init(&priv->lock);
193 180
194 if (dev_priv->chipset >= 0x17) { 181 if (nv_device(drm->device)->chipset >= 0x17) {
195 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 182 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196 0, 0x0000, NULL, &priv->bo); 183 0, 0x0000, NULL, &priv->bo);
197 if (!ret) { 184 if (!ret) {
198 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 185 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -209,6 +196,6 @@ nv10_fence_create(struct drm_device *dev)
209 } 196 }
210 197
211 if (ret) 198 if (ret)
212 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 199 nv10_fence_destroy(drm);
213 return ret; 200 return ret;
214} 201}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
deleted file mode 100644
index f1fe7d758241..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv10_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 {}
51};
52
53struct nv10_fifo_priv {
54 struct nouveau_fifo_priv base;
55 struct ramfc_desc *ramfc_desc;
56};
57
58struct nv10_fifo_chan {
59 struct nouveau_fifo_chan base;
60 struct nouveau_gpuobj *ramfc;
61};
62
63static int
64nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
65{
66 struct drm_device *dev = chan->dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nv10_fifo_priv *priv = nv_engine(dev, engine);
69 struct nv10_fifo_chan *fctx;
70 unsigned long flags;
71 int ret;
72
73 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
74 if (!fctx)
75 return -ENOMEM;
76
77 /* map channel control registers */
78 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
79 NV03_USER(chan->id), PAGE_SIZE);
80 if (!chan->user) {
81 ret = -ENOMEM;
82 goto error;
83 }
84
85 /* initialise default fifo context */
86 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
87 chan->id * 32, ~0, 32,
88 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
89 if (ret)
90 goto error;
91
92 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
93 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
94 nv_wo32(fctx->ramfc, 0x08, 0x00000000);
95 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
96 nv_wo32(fctx->ramfc, 0x10, 0x00000000);
97 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
98 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
99#ifdef __BIG_ENDIAN
100 NV_PFIFO_CACHE1_BIG_ENDIAN |
101#endif
102 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
103 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
104 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
105
106 /* enable dma mode on the channel */
107 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
108 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
109 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
110
111error:
112 if (ret)
113 priv->base.base.context_del(chan, engine);
114 return ret;
115}
116
117int
118nv10_fifo_create(struct drm_device *dev)
119{
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nv10_fifo_priv *priv;
122
123 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
124 if (!priv)
125 return -ENOMEM;
126
127 priv->base.base.destroy = nv04_fifo_destroy;
128 priv->base.base.init = nv04_fifo_init;
129 priv->base.base.fini = nv04_fifo_fini;
130 priv->base.base.context_new = nv10_fifo_context_new;
131 priv->base.base.context_del = nv04_fifo_context_del;
132 priv->base.channels = 31;
133 priv->ramfc_desc = nv10_ramfc;
134 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
135
136 nouveau_irq_register(dev, 8, nv04_fifo_isr);
137 return 0;
138}
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
deleted file mode 100644
index 9d79180069df..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30#include "nouveau_gpio.h"
31
32int
33nv10_gpio_sense(struct drm_device *dev, int line)
34{
35 if (line < 2) {
36 line = line * 16;
37 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
38 return !!(line & 0x0100);
39 } else
40 if (line < 10) {
41 line = (line - 2) * 4;
42 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
43 return !!(line & 0x04);
44 } else
45 if (line < 14) {
46 line = (line - 10) * 4;
47 line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
48 return !!(line & 0x04);
49 }
50
51 return -EINVAL;
52}
53
54int
55nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
56{
57 u32 reg, mask, data;
58
59 if (line < 2) {
60 line = line * 16;
61 reg = NV_PCRTC_GPIO;
62 mask = 0x00000011;
63 data = (dir << 4) | out;
64 } else
65 if (line < 10) {
66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003;
69 data = (dir << 1) | out;
70 } else
71 if (line < 14) {
72 line = (line - 10) * 4;
73 reg = NV_PCRTC_850;
74 mask = 0x00000003;
75 data = (dir << 1) | out;
76 } else {
77 return -EINVAL;
78 }
79
80 mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
81 NVWriteCRTC(dev, 0, reg, mask | (data << line));
82 return 0;
83}
84
85void
86nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
87{
88 u32 mask = 0x00010001 << line;
89
90 nv_wr32(dev, 0x001104, mask);
91 nv_mask(dev, 0x001144, mask, on ? mask : 0);
92}
93
94static void
95nv10_gpio_isr(struct drm_device *dev)
96{
97 u32 intr = nv_rd32(dev, 0x1104);
98 u32 hi = (intr & 0x0000ffff) >> 0;
99 u32 lo = (intr & 0xffff0000) >> 16;
100
101 nouveau_gpio_isr(dev, 0, hi | lo);
102
103 nv_wr32(dev, 0x001104, intr);
104}
105
106int
107nv10_gpio_init(struct drm_device *dev)
108{
109 nv_wr32(dev, 0x001140, 0x00000000);
110 nv_wr32(dev, 0x001100, 0xffffffff);
111 nv_wr32(dev, 0x001144, 0x00000000);
112 nv_wr32(dev, 0x001104, 0xffffffff);
113 nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
114 return 0;
115}
116
117void
118nv10_gpio_fini(struct drm_device *dev)
119{
120 nv_wr32(dev, 0x001140, 0x00000000);
121 nv_wr32(dev, 0x001144, 0x00000000);
122 nouveau_irq_unregister(dev, 28);
123}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
deleted file mode 100644
index fb1d88a951de..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ /dev/null
@@ -1,1189 +0,0 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29#include "nouveau_util.h"
30
31struct nv10_graph_engine {
32 struct nouveau_exec_engine base;
33};
34
35struct pipe_state {
36 uint32_t pipe_0x0000[0x040/4];
37 uint32_t pipe_0x0040[0x010/4];
38 uint32_t pipe_0x0200[0x0c0/4];
39 uint32_t pipe_0x4400[0x080/4];
40 uint32_t pipe_0x6400[0x3b0/4];
41 uint32_t pipe_0x6800[0x2f0/4];
42 uint32_t pipe_0x6c00[0x030/4];
43 uint32_t pipe_0x7000[0x130/4];
44 uint32_t pipe_0x7400[0x0c0/4];
45 uint32_t pipe_0x7800[0x0c0/4];
46};
47
48static int nv10_graph_ctx_regs[] = {
49 NV10_PGRAPH_CTX_SWITCH(0),
50 NV10_PGRAPH_CTX_SWITCH(1),
51 NV10_PGRAPH_CTX_SWITCH(2),
52 NV10_PGRAPH_CTX_SWITCH(3),
53 NV10_PGRAPH_CTX_SWITCH(4),
54 NV10_PGRAPH_CTX_CACHE(0, 0),
55 NV10_PGRAPH_CTX_CACHE(0, 1),
56 NV10_PGRAPH_CTX_CACHE(0, 2),
57 NV10_PGRAPH_CTX_CACHE(0, 3),
58 NV10_PGRAPH_CTX_CACHE(0, 4),
59 NV10_PGRAPH_CTX_CACHE(1, 0),
60 NV10_PGRAPH_CTX_CACHE(1, 1),
61 NV10_PGRAPH_CTX_CACHE(1, 2),
62 NV10_PGRAPH_CTX_CACHE(1, 3),
63 NV10_PGRAPH_CTX_CACHE(1, 4),
64 NV10_PGRAPH_CTX_CACHE(2, 0),
65 NV10_PGRAPH_CTX_CACHE(2, 1),
66 NV10_PGRAPH_CTX_CACHE(2, 2),
67 NV10_PGRAPH_CTX_CACHE(2, 3),
68 NV10_PGRAPH_CTX_CACHE(2, 4),
69 NV10_PGRAPH_CTX_CACHE(3, 0),
70 NV10_PGRAPH_CTX_CACHE(3, 1),
71 NV10_PGRAPH_CTX_CACHE(3, 2),
72 NV10_PGRAPH_CTX_CACHE(3, 3),
73 NV10_PGRAPH_CTX_CACHE(3, 4),
74 NV10_PGRAPH_CTX_CACHE(4, 0),
75 NV10_PGRAPH_CTX_CACHE(4, 1),
76 NV10_PGRAPH_CTX_CACHE(4, 2),
77 NV10_PGRAPH_CTX_CACHE(4, 3),
78 NV10_PGRAPH_CTX_CACHE(4, 4),
79 NV10_PGRAPH_CTX_CACHE(5, 0),
80 NV10_PGRAPH_CTX_CACHE(5, 1),
81 NV10_PGRAPH_CTX_CACHE(5, 2),
82 NV10_PGRAPH_CTX_CACHE(5, 3),
83 NV10_PGRAPH_CTX_CACHE(5, 4),
84 NV10_PGRAPH_CTX_CACHE(6, 0),
85 NV10_PGRAPH_CTX_CACHE(6, 1),
86 NV10_PGRAPH_CTX_CACHE(6, 2),
87 NV10_PGRAPH_CTX_CACHE(6, 3),
88 NV10_PGRAPH_CTX_CACHE(6, 4),
89 NV10_PGRAPH_CTX_CACHE(7, 0),
90 NV10_PGRAPH_CTX_CACHE(7, 1),
91 NV10_PGRAPH_CTX_CACHE(7, 2),
92 NV10_PGRAPH_CTX_CACHE(7, 3),
93 NV10_PGRAPH_CTX_CACHE(7, 4),
94 NV10_PGRAPH_CTX_USER,
95 NV04_PGRAPH_DMA_START_0,
96 NV04_PGRAPH_DMA_START_1,
97 NV04_PGRAPH_DMA_LENGTH,
98 NV04_PGRAPH_DMA_MISC,
99 NV10_PGRAPH_DMA_PITCH,
100 NV04_PGRAPH_BOFFSET0,
101 NV04_PGRAPH_BBASE0,
102 NV04_PGRAPH_BLIMIT0,
103 NV04_PGRAPH_BOFFSET1,
104 NV04_PGRAPH_BBASE1,
105 NV04_PGRAPH_BLIMIT1,
106 NV04_PGRAPH_BOFFSET2,
107 NV04_PGRAPH_BBASE2,
108 NV04_PGRAPH_BLIMIT2,
109 NV04_PGRAPH_BOFFSET3,
110 NV04_PGRAPH_BBASE3,
111 NV04_PGRAPH_BLIMIT3,
112 NV04_PGRAPH_BOFFSET4,
113 NV04_PGRAPH_BBASE4,
114 NV04_PGRAPH_BLIMIT4,
115 NV04_PGRAPH_BOFFSET5,
116 NV04_PGRAPH_BBASE5,
117 NV04_PGRAPH_BLIMIT5,
118 NV04_PGRAPH_BPITCH0,
119 NV04_PGRAPH_BPITCH1,
120 NV04_PGRAPH_BPITCH2,
121 NV04_PGRAPH_BPITCH3,
122 NV04_PGRAPH_BPITCH4,
123 NV10_PGRAPH_SURFACE,
124 NV10_PGRAPH_STATE,
125 NV04_PGRAPH_BSWIZZLE2,
126 NV04_PGRAPH_BSWIZZLE5,
127 NV04_PGRAPH_BPIXEL,
128 NV10_PGRAPH_NOTIFY,
129 NV04_PGRAPH_PATT_COLOR0,
130 NV04_PGRAPH_PATT_COLOR1,
131 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
132 0x00400904,
133 0x00400908,
134 0x0040090c,
135 0x00400910,
136 0x00400914,
137 0x00400918,
138 0x0040091c,
139 0x00400920,
140 0x00400924,
141 0x00400928,
142 0x0040092c,
143 0x00400930,
144 0x00400934,
145 0x00400938,
146 0x0040093c,
147 0x00400940,
148 0x00400944,
149 0x00400948,
150 0x0040094c,
151 0x00400950,
152 0x00400954,
153 0x00400958,
154 0x0040095c,
155 0x00400960,
156 0x00400964,
157 0x00400968,
158 0x0040096c,
159 0x00400970,
160 0x00400974,
161 0x00400978,
162 0x0040097c,
163 0x00400980,
164 0x00400984,
165 0x00400988,
166 0x0040098c,
167 0x00400990,
168 0x00400994,
169 0x00400998,
170 0x0040099c,
171 0x004009a0,
172 0x004009a4,
173 0x004009a8,
174 0x004009ac,
175 0x004009b0,
176 0x004009b4,
177 0x004009b8,
178 0x004009bc,
179 0x004009c0,
180 0x004009c4,
181 0x004009c8,
182 0x004009cc,
183 0x004009d0,
184 0x004009d4,
185 0x004009d8,
186 0x004009dc,
187 0x004009e0,
188 0x004009e4,
189 0x004009e8,
190 0x004009ec,
191 0x004009f0,
192 0x004009f4,
193 0x004009f8,
194 0x004009fc,
195 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
196 0x0040080c,
197 NV04_PGRAPH_PATTERN_SHAPE,
198 NV03_PGRAPH_MONO_COLOR0,
199 NV04_PGRAPH_ROP3,
200 NV04_PGRAPH_CHROMA,
201 NV04_PGRAPH_BETA_AND,
202 NV04_PGRAPH_BETA_PREMULT,
203 0x00400e70,
204 0x00400e74,
205 0x00400e78,
206 0x00400e7c,
207 0x00400e80,
208 0x00400e84,
209 0x00400e88,
210 0x00400e8c,
211 0x00400ea0,
212 0x00400ea4,
213 0x00400ea8,
214 0x00400e90,
215 0x00400e94,
216 0x00400e98,
217 0x00400e9c,
218 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
219 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
220 0x00400f04,
221 0x00400f24,
222 0x00400f08,
223 0x00400f28,
224 0x00400f0c,
225 0x00400f2c,
226 0x00400f10,
227 0x00400f30,
228 0x00400f14,
229 0x00400f34,
230 0x00400f18,
231 0x00400f38,
232 0x00400f1c,
233 0x00400f3c,
234 NV10_PGRAPH_XFMODE0,
235 NV10_PGRAPH_XFMODE1,
236 NV10_PGRAPH_GLOBALSTATE0,
237 NV10_PGRAPH_GLOBALSTATE1,
238 NV04_PGRAPH_STORED_FMT,
239 NV04_PGRAPH_SOURCE_COLOR,
240 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
241 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
242 0x00400404,
243 0x00400484,
244 0x00400408,
245 0x00400488,
246 0x0040040c,
247 0x0040048c,
248 0x00400410,
249 0x00400490,
250 0x00400414,
251 0x00400494,
252 0x00400418,
253 0x00400498,
254 0x0040041c,
255 0x0040049c,
256 0x00400420,
257 0x004004a0,
258 0x00400424,
259 0x004004a4,
260 0x00400428,
261 0x004004a8,
262 0x0040042c,
263 0x004004ac,
264 0x00400430,
265 0x004004b0,
266 0x00400434,
267 0x004004b4,
268 0x00400438,
269 0x004004b8,
270 0x0040043c,
271 0x004004bc,
272 0x00400440,
273 0x004004c0,
274 0x00400444,
275 0x004004c4,
276 0x00400448,
277 0x004004c8,
278 0x0040044c,
279 0x004004cc,
280 0x00400450,
281 0x004004d0,
282 0x00400454,
283 0x004004d4,
284 0x00400458,
285 0x004004d8,
286 0x0040045c,
287 0x004004dc,
288 0x00400460,
289 0x004004e0,
290 0x00400464,
291 0x004004e4,
292 0x00400468,
293 0x004004e8,
294 0x0040046c,
295 0x004004ec,
296 0x00400470,
297 0x004004f0,
298 0x00400474,
299 0x004004f4,
300 0x00400478,
301 0x004004f8,
302 0x0040047c,
303 0x004004fc,
304 NV03_PGRAPH_ABS_UCLIP_XMIN,
305 NV03_PGRAPH_ABS_UCLIP_XMAX,
306 NV03_PGRAPH_ABS_UCLIP_YMIN,
307 NV03_PGRAPH_ABS_UCLIP_YMAX,
308 0x00400550,
309 0x00400558,
310 0x00400554,
311 0x0040055c,
312 NV03_PGRAPH_ABS_UCLIPA_XMIN,
313 NV03_PGRAPH_ABS_UCLIPA_XMAX,
314 NV03_PGRAPH_ABS_UCLIPA_YMIN,
315 NV03_PGRAPH_ABS_UCLIPA_YMAX,
316 NV03_PGRAPH_ABS_ICLIP_XMAX,
317 NV03_PGRAPH_ABS_ICLIP_YMAX,
318 NV03_PGRAPH_XY_LOGIC_MISC0,
319 NV03_PGRAPH_XY_LOGIC_MISC1,
320 NV03_PGRAPH_XY_LOGIC_MISC2,
321 NV03_PGRAPH_XY_LOGIC_MISC3,
322 NV03_PGRAPH_CLIPX_0,
323 NV03_PGRAPH_CLIPX_1,
324 NV03_PGRAPH_CLIPY_0,
325 NV03_PGRAPH_CLIPY_1,
326 NV10_PGRAPH_COMBINER0_IN_ALPHA,
327 NV10_PGRAPH_COMBINER1_IN_ALPHA,
328 NV10_PGRAPH_COMBINER0_IN_RGB,
329 NV10_PGRAPH_COMBINER1_IN_RGB,
330 NV10_PGRAPH_COMBINER_COLOR0,
331 NV10_PGRAPH_COMBINER_COLOR1,
332 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
333 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
334 NV10_PGRAPH_COMBINER0_OUT_RGB,
335 NV10_PGRAPH_COMBINER1_OUT_RGB,
336 NV10_PGRAPH_COMBINER_FINAL0,
337 NV10_PGRAPH_COMBINER_FINAL1,
338 0x00400e00,
339 0x00400e04,
340 0x00400e08,
341 0x00400e0c,
342 0x00400e10,
343 0x00400e14,
344 0x00400e18,
345 0x00400e1c,
346 0x00400e20,
347 0x00400e24,
348 0x00400e28,
349 0x00400e2c,
350 0x00400e30,
351 0x00400e34,
352 0x00400e38,
353 0x00400e3c,
354 NV04_PGRAPH_PASSTHRU_0,
355 NV04_PGRAPH_PASSTHRU_1,
356 NV04_PGRAPH_PASSTHRU_2,
357 NV10_PGRAPH_DIMX_TEXTURE,
358 NV10_PGRAPH_WDIMX_TEXTURE,
359 NV10_PGRAPH_DVD_COLORFMT,
360 NV10_PGRAPH_SCALED_FORMAT,
361 NV04_PGRAPH_MISC24_0,
362 NV04_PGRAPH_MISC24_1,
363 NV04_PGRAPH_MISC24_2,
364 NV03_PGRAPH_X_MISC,
365 NV03_PGRAPH_Y_MISC,
366 NV04_PGRAPH_VALID1,
367 NV04_PGRAPH_VALID2,
368};
369
370static int nv17_graph_ctx_regs[] = {
371 NV10_PGRAPH_DEBUG_4,
372 0x004006b0,
373 0x00400eac,
374 0x00400eb0,
375 0x00400eb4,
376 0x00400eb8,
377 0x00400ebc,
378 0x00400ec0,
379 0x00400ec4,
380 0x00400ec8,
381 0x00400ecc,
382 0x00400ed0,
383 0x00400ed4,
384 0x00400ed8,
385 0x00400edc,
386 0x00400ee0,
387 0x00400a00,
388 0x00400a04,
389};
390
391struct graph_state {
392 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
393 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
394 struct pipe_state pipe_state;
395 uint32_t lma_window[4];
396};
397
398#define PIPE_SAVE(dev, state, addr) \
399 do { \
400 int __i; \
401 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
402 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
403 state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
404 } while (0)
405
406#define PIPE_RESTORE(dev, state, addr) \
407 do { \
408 int __i; \
409 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
410 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
411 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
412 } while (0)
413
414static void nv10_graph_save_pipe(struct nouveau_channel *chan)
415{
416 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
417 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
418 struct drm_device *dev = chan->dev;
419
420 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
421 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
422 PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
423 PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
424 PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
425 PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
426 PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
427 PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
428 PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
429 PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
430}
431
432static void nv10_graph_load_pipe(struct nouveau_channel *chan)
433{
434 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
435 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
436 struct drm_device *dev = chan->dev;
437 uint32_t xfmode0, xfmode1;
438 int i;
439
440 nouveau_wait_for_idle(dev);
441 /* XXX check haiku comments */
442 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
443 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
444 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
445 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
446 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
447 for (i = 0; i < 4; i++)
448 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
449 for (i = 0; i < 4; i++)
450 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
451
452 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
453 for (i = 0; i < 3; i++)
454 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
455
456 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
457 for (i = 0; i < 3; i++)
458 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
459
460 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
461 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
462
463
464 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
465 nouveau_wait_for_idle(dev);
466
467 /* restore XFMODE */
468 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
469 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
470 PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
471 PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
472 PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
473 PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
474 PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
475 PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
476 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
477 PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
478 PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
479 nouveau_wait_for_idle(dev);
480}
481
482static void nv10_graph_create_pipe(struct nouveau_channel *chan)
483{
484 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
485 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
486 struct drm_device *dev = chan->dev;
487 uint32_t *fifo_pipe_state_addr;
488 int i;
489#define PIPE_INIT(addr) \
490 do { \
491 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
492 } while (0)
493#define PIPE_INIT_END(addr) \
494 do { \
495 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
496 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
497 if (fifo_pipe_state_addr != __end_addr) \
498 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
499 addr, fifo_pipe_state_addr, __end_addr); \
500 } while (0)
501#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
502
503 PIPE_INIT(0x0200);
504 for (i = 0; i < 48; i++)
505 NV_WRITE_PIPE_INIT(0x00000000);
506 PIPE_INIT_END(0x0200);
507
508 PIPE_INIT(0x6400);
509 for (i = 0; i < 211; i++)
510 NV_WRITE_PIPE_INIT(0x00000000);
511 NV_WRITE_PIPE_INIT(0x3f800000);
512 NV_WRITE_PIPE_INIT(0x40000000);
513 NV_WRITE_PIPE_INIT(0x40000000);
514 NV_WRITE_PIPE_INIT(0x40000000);
515 NV_WRITE_PIPE_INIT(0x40000000);
516 NV_WRITE_PIPE_INIT(0x00000000);
517 NV_WRITE_PIPE_INIT(0x00000000);
518 NV_WRITE_PIPE_INIT(0x3f800000);
519 NV_WRITE_PIPE_INIT(0x00000000);
520 NV_WRITE_PIPE_INIT(0x3f000000);
521 NV_WRITE_PIPE_INIT(0x3f000000);
522 NV_WRITE_PIPE_INIT(0x00000000);
523 NV_WRITE_PIPE_INIT(0x00000000);
524 NV_WRITE_PIPE_INIT(0x00000000);
525 NV_WRITE_PIPE_INIT(0x00000000);
526 NV_WRITE_PIPE_INIT(0x3f800000);
527 NV_WRITE_PIPE_INIT(0x00000000);
528 NV_WRITE_PIPE_INIT(0x00000000);
529 NV_WRITE_PIPE_INIT(0x00000000);
530 NV_WRITE_PIPE_INIT(0x00000000);
531 NV_WRITE_PIPE_INIT(0x00000000);
532 NV_WRITE_PIPE_INIT(0x3f800000);
533 NV_WRITE_PIPE_INIT(0x3f800000);
534 NV_WRITE_PIPE_INIT(0x3f800000);
535 NV_WRITE_PIPE_INIT(0x3f800000);
536 PIPE_INIT_END(0x6400);
537
538 PIPE_INIT(0x6800);
539 for (i = 0; i < 162; i++)
540 NV_WRITE_PIPE_INIT(0x00000000);
541 NV_WRITE_PIPE_INIT(0x3f800000);
542 for (i = 0; i < 25; i++)
543 NV_WRITE_PIPE_INIT(0x00000000);
544 PIPE_INIT_END(0x6800);
545
546 PIPE_INIT(0x6c00);
547 NV_WRITE_PIPE_INIT(0x00000000);
548 NV_WRITE_PIPE_INIT(0x00000000);
549 NV_WRITE_PIPE_INIT(0x00000000);
550 NV_WRITE_PIPE_INIT(0x00000000);
551 NV_WRITE_PIPE_INIT(0xbf800000);
552 NV_WRITE_PIPE_INIT(0x00000000);
553 NV_WRITE_PIPE_INIT(0x00000000);
554 NV_WRITE_PIPE_INIT(0x00000000);
555 NV_WRITE_PIPE_INIT(0x00000000);
556 NV_WRITE_PIPE_INIT(0x00000000);
557 NV_WRITE_PIPE_INIT(0x00000000);
558 NV_WRITE_PIPE_INIT(0x00000000);
559 PIPE_INIT_END(0x6c00);
560
561 PIPE_INIT(0x7000);
562 NV_WRITE_PIPE_INIT(0x00000000);
563 NV_WRITE_PIPE_INIT(0x00000000);
564 NV_WRITE_PIPE_INIT(0x00000000);
565 NV_WRITE_PIPE_INIT(0x00000000);
566 NV_WRITE_PIPE_INIT(0x00000000);
567 NV_WRITE_PIPE_INIT(0x00000000);
568 NV_WRITE_PIPE_INIT(0x00000000);
569 NV_WRITE_PIPE_INIT(0x00000000);
570 NV_WRITE_PIPE_INIT(0x00000000);
571 NV_WRITE_PIPE_INIT(0x00000000);
572 NV_WRITE_PIPE_INIT(0x00000000);
573 NV_WRITE_PIPE_INIT(0x00000000);
574 NV_WRITE_PIPE_INIT(0x7149f2ca);
575 NV_WRITE_PIPE_INIT(0x00000000);
576 NV_WRITE_PIPE_INIT(0x00000000);
577 NV_WRITE_PIPE_INIT(0x00000000);
578 NV_WRITE_PIPE_INIT(0x7149f2ca);
579 NV_WRITE_PIPE_INIT(0x00000000);
580 NV_WRITE_PIPE_INIT(0x00000000);
581 NV_WRITE_PIPE_INIT(0x00000000);
582 NV_WRITE_PIPE_INIT(0x7149f2ca);
583 NV_WRITE_PIPE_INIT(0x00000000);
584 NV_WRITE_PIPE_INIT(0x00000000);
585 NV_WRITE_PIPE_INIT(0x00000000);
586 NV_WRITE_PIPE_INIT(0x7149f2ca);
587 NV_WRITE_PIPE_INIT(0x00000000);
588 NV_WRITE_PIPE_INIT(0x00000000);
589 NV_WRITE_PIPE_INIT(0x00000000);
590 NV_WRITE_PIPE_INIT(0x7149f2ca);
591 NV_WRITE_PIPE_INIT(0x00000000);
592 NV_WRITE_PIPE_INIT(0x00000000);
593 NV_WRITE_PIPE_INIT(0x00000000);
594 NV_WRITE_PIPE_INIT(0x7149f2ca);
595 NV_WRITE_PIPE_INIT(0x00000000);
596 NV_WRITE_PIPE_INIT(0x00000000);
597 NV_WRITE_PIPE_INIT(0x00000000);
598 NV_WRITE_PIPE_INIT(0x7149f2ca);
599 NV_WRITE_PIPE_INIT(0x00000000);
600 NV_WRITE_PIPE_INIT(0x00000000);
601 NV_WRITE_PIPE_INIT(0x00000000);
602 NV_WRITE_PIPE_INIT(0x7149f2ca);
603 for (i = 0; i < 35; i++)
604 NV_WRITE_PIPE_INIT(0x00000000);
605 PIPE_INIT_END(0x7000);
606
607 PIPE_INIT(0x7400);
608 for (i = 0; i < 48; i++)
609 NV_WRITE_PIPE_INIT(0x00000000);
610 PIPE_INIT_END(0x7400);
611
612 PIPE_INIT(0x7800);
613 for (i = 0; i < 48; i++)
614 NV_WRITE_PIPE_INIT(0x00000000);
615 PIPE_INIT_END(0x7800);
616
617 PIPE_INIT(0x4400);
618 for (i = 0; i < 32; i++)
619 NV_WRITE_PIPE_INIT(0x00000000);
620 PIPE_INIT_END(0x4400);
621
622 PIPE_INIT(0x0000);
623 for (i = 0; i < 16; i++)
624 NV_WRITE_PIPE_INIT(0x00000000);
625 PIPE_INIT_END(0x0000);
626
627 PIPE_INIT(0x0040);
628 for (i = 0; i < 4; i++)
629 NV_WRITE_PIPE_INIT(0x00000000);
630 PIPE_INIT_END(0x0040);
631
632#undef PIPE_INIT
633#undef PIPE_INIT_END
634#undef NV_WRITE_PIPE_INIT
635}
636
637static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
638{
639 int i;
640 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
641 if (nv10_graph_ctx_regs[i] == reg)
642 return i;
643 }
644 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
645 return -1;
646}
647
648static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
649{
650 int i;
651 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
652 if (nv17_graph_ctx_regs[i] == reg)
653 return i;
654 }
655 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
656 return -1;
657}
658
659static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
660 uint32_t inst)
661{
662 struct drm_device *dev = chan->dev;
663 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
664 uint32_t ctx_user, ctx_switch[5];
665 int i, subchan = -1;
666
667 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
668 * that cannot be restored via MMIO. Do it through the FIFO
669 * instead.
670 */
671
672 /* Look for a celsius object */
673 for (i = 0; i < 8; i++) {
674 int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
675
676 if (class == 0x56 || class == 0x96 || class == 0x99) {
677 subchan = i;
678 break;
679 }
680 }
681
682 if (subchan < 0 || !inst)
683 return;
684
685 /* Save the current ctx object */
686 ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
687 for (i = 0; i < 5; i++)
688 ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
689
690 /* Save the FIFO state */
691 st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
692 st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
693 st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
694 fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
695
696 for (i = 0; i < ARRAY_SIZE(fifo); i++)
697 fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
698
699 /* Switch to the celsius subchannel */
700 for (i = 0; i < 5; i++)
701 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
702 nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
703 nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
704
705 /* Inject NV10TCL_DMA_VTXBUF */
706 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
707 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
708 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
709 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
710 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
711 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
712 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
713
714 /* Restore the FIFO state */
715 for (i = 0; i < ARRAY_SIZE(fifo); i++)
716 nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
717
718 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
719 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
720 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
721 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
722
723 /* Restore the current ctx object */
724 for (i = 0; i < 5; i++)
725 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
726 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
727}
728
729static int
730nv10_graph_load_context(struct nouveau_channel *chan)
731{
732 struct drm_device *dev = chan->dev;
733 struct drm_nouveau_private *dev_priv = dev->dev_private;
734 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
735 uint32_t tmp;
736 int i;
737
738 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
739 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
740 if (dev_priv->chipset >= 0x17) {
741 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
742 nv_wr32(dev, nv17_graph_ctx_regs[i],
743 pgraph_ctx->nv17[i]);
744 }
745
746 nv10_graph_load_pipe(chan);
747 nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
748 & 0xffff));
749
750 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
751 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
752 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
753 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
754 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
755 return 0;
756}
757
758static int
759nv10_graph_unload_context(struct drm_device *dev)
760{
761 struct drm_nouveau_private *dev_priv = dev->dev_private;
762 struct nouveau_channel *chan;
763 struct graph_state *ctx;
764 uint32_t tmp;
765 int i;
766
767 chan = nv10_graph_channel(dev);
768 if (!chan)
769 return 0;
770 ctx = chan->engctx[NVOBJ_ENGINE_GR];
771
772 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
773 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
774
775 if (dev_priv->chipset >= 0x17) {
776 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
777 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
778 }
779
780 nv10_graph_save_pipe(chan);
781
782 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
783 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
784 tmp |= 31 << 24;
785 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
786 return 0;
787}
788
789static void
790nv10_graph_context_switch(struct drm_device *dev)
791{
792 struct drm_nouveau_private *dev_priv = dev->dev_private;
793 struct nouveau_channel *chan = NULL;
794 int chid;
795
796 nouveau_wait_for_idle(dev);
797
798 /* If previous context is valid, we need to save it */
799 nv10_graph_unload_context(dev);
800
801 /* Load context for next channel */
802 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
803 chan = dev_priv->channels.ptr[chid];
804 if (chan && chan->engctx[NVOBJ_ENGINE_GR])
805 nv10_graph_load_context(chan);
806}
807
808#define NV_WRITE_CTX(reg, val) do { \
809 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
810 if (offset > 0) \
811 pgraph_ctx->nv10[offset] = val; \
812 } while (0)
813
814#define NV17_WRITE_CTX(reg, val) do { \
815 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
816 if (offset > 0) \
817 pgraph_ctx->nv17[offset] = val; \
818 } while (0)
819
820struct nouveau_channel *
821nv10_graph_channel(struct drm_device *dev)
822{
823 struct drm_nouveau_private *dev_priv = dev->dev_private;
824 int chid = 31;
825
826 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
827 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
828
829 if (chid >= 31)
830 return NULL;
831
832 return dev_priv->channels.ptr[chid];
833}
834
835static int
836nv10_graph_context_new(struct nouveau_channel *chan, int engine)
837{
838 struct drm_device *dev = chan->dev;
839 struct drm_nouveau_private *dev_priv = dev->dev_private;
840 struct graph_state *pgraph_ctx;
841
842 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
843
844 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
845 if (pgraph_ctx == NULL)
846 return -ENOMEM;
847 chan->engctx[engine] = pgraph_ctx;
848
849 NV_WRITE_CTX(0x00400e88, 0x08000000);
850 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
851 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
852 NV_WRITE_CTX(0x00400e10, 0x00001000);
853 NV_WRITE_CTX(0x00400e14, 0x00001000);
854 NV_WRITE_CTX(0x00400e30, 0x00080008);
855 NV_WRITE_CTX(0x00400e34, 0x00080008);
856 if (dev_priv->chipset >= 0x17) {
857 /* is it really needed ??? */
858 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
859 nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
860 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
861 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
862 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
863 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
864 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
865 }
866 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
867
868 nv10_graph_create_pipe(chan);
869 return 0;
870}
871
872static void
873nv10_graph_context_del(struct nouveau_channel *chan, int engine)
874{
875 struct drm_device *dev = chan->dev;
876 struct drm_nouveau_private *dev_priv = dev->dev_private;
877 struct graph_state *pgraph_ctx = chan->engctx[engine];
878 unsigned long flags;
879
880 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
881 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
882
883 /* Unload the context if it's the currently active one */
884 if (nv10_graph_channel(dev) == chan)
885 nv10_graph_unload_context(dev);
886
887 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
888 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
889
890 /* Free the context resources */
891 chan->engctx[engine] = NULL;
892 kfree(pgraph_ctx);
893}
894
895static void
896nv10_graph_set_tile_region(struct drm_device *dev, int i)
897{
898 struct drm_nouveau_private *dev_priv = dev->dev_private;
899 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
900
901 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
902 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
903 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
904}
905
906static int
907nv10_graph_init(struct drm_device *dev, int engine)
908{
909 struct drm_nouveau_private *dev_priv = dev->dev_private;
910 u32 tmp;
911 int i;
912
913 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
914 ~NV_PMC_ENABLE_PGRAPH);
915 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
916 NV_PMC_ENABLE_PGRAPH);
917
918 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
919 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
920
921 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
922 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
923 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
924 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
925 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
926 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
927 (1<<29) |
928 (1<<31));
929 if (dev_priv->chipset >= 0x17) {
930 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
931 nv_wr32(dev, 0x400a10, 0x3ff3fb6);
932 nv_wr32(dev, 0x400838, 0x2f8684);
933 nv_wr32(dev, 0x40083c, 0x115f3f);
934 nv_wr32(dev, 0x004006b0, 0x40000020);
935 } else
936 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
937
938 /* Turn all the tiling regions off. */
939 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
940 nv10_graph_set_tile_region(dev, i);
941
942 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
943 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
944 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
945 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
946 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
947 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
948
949 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
950 tmp |= 31 << 24;
951 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
952 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
953 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
954
955 return 0;
956}
957
958static int
959nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
960{
961 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
962 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
963 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
964 return -EBUSY;
965 }
966 nv10_graph_unload_context(dev);
967 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
968 return 0;
969}
970
971static int
972nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
973 u32 class, u32 mthd, u32 data)
974{
975 struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
976 struct drm_device *dev = chan->dev;
977 struct pipe_state *pipe = &ctx->pipe_state;
978 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
979 uint32_t xfmode0, xfmode1;
980 int i;
981
982 ctx->lma_window[(mthd - 0x1638) / 4] = data;
983
984 if (mthd != 0x1644)
985 return 0;
986
987 nouveau_wait_for_idle(dev);
988
989 PIPE_SAVE(dev, pipe_0x0040, 0x0040);
990 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
991
992 PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
993
994 nouveau_wait_for_idle(dev);
995
996 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
997 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
998
999 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
1000 PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
1001 PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
1002 PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
1003
1004 nouveau_wait_for_idle(dev);
1005
1006 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
1007 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
1008 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
1009 for (i = 0; i < 4; i++)
1010 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1011 for (i = 0; i < 4; i++)
1012 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1013
1014 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
1015 for (i = 0; i < 3; i++)
1016 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1017
1018 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
1019 for (i = 0; i < 3; i++)
1020 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1021
1022 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
1023 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
1024
1025 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
1026
1027 nouveau_wait_for_idle(dev);
1028
1029 PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
1030
1031 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
1032 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
1033
1034 PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
1035 PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
1036 PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
1037 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
1038
1039 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
1040 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1041
1042 nouveau_wait_for_idle(dev);
1043
1044 return 0;
1045}
1046
1047static int
1048nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1049 u32 class, u32 mthd, u32 data)
1050{
1051 struct drm_device *dev = chan->dev;
1052
1053 nouveau_wait_for_idle(dev);
1054
1055 nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
1056 nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
1057 nv_wr32(dev, 0x004006b0,
1058 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
1059
1060 return 0;
1061}
1062
1063struct nouveau_bitfield nv10_graph_intr[] = {
1064 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1065 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1066 {}
1067};
1068
1069struct nouveau_bitfield nv10_graph_nstatus[] = {
1070 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1071 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1072 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1073 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1074 {}
1075};
1076
1077static void
1078nv10_graph_isr(struct drm_device *dev)
1079{
1080 u32 stat;
1081
1082 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1083 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1084 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1085 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1086 u32 chid = (addr & 0x01f00000) >> 20;
1087 u32 subc = (addr & 0x00070000) >> 16;
1088 u32 mthd = (addr & 0x00001ffc);
1089 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1090 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
1091 u32 show = stat;
1092
1093 if (stat & NV_PGRAPH_INTR_ERROR) {
1094 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1095 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1096 show &= ~NV_PGRAPH_INTR_ERROR;
1097 }
1098 }
1099
1100 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1101 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1102 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1103 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1104 nv10_graph_context_switch(dev);
1105 }
1106
1107 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1108 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1109
1110 if (show && nouveau_ratelimit()) {
1111 NV_INFO(dev, "PGRAPH -");
1112 nouveau_bitfield_print(nv10_graph_intr, show);
1113 printk(" nsource:");
1114 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1115 printk(" nstatus:");
1116 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1117 printk("\n");
1118 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1119 "mthd 0x%04x data 0x%08x\n",
1120 chid, subc, class, mthd, data);
1121 }
1122 }
1123}
1124
1125static void
1126nv10_graph_destroy(struct drm_device *dev, int engine)
1127{
1128 struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
1129
1130 nouveau_irq_unregister(dev, 12);
1131 kfree(pgraph);
1132}
1133
1134int
1135nv10_graph_create(struct drm_device *dev)
1136{
1137 struct drm_nouveau_private *dev_priv = dev->dev_private;
1138 struct nv10_graph_engine *pgraph;
1139
1140 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1141 if (!pgraph)
1142 return -ENOMEM;
1143
1144 pgraph->base.destroy = nv10_graph_destroy;
1145 pgraph->base.init = nv10_graph_init;
1146 pgraph->base.fini = nv10_graph_fini;
1147 pgraph->base.context_new = nv10_graph_context_new;
1148 pgraph->base.context_del = nv10_graph_context_del;
1149 pgraph->base.object_new = nv04_graph_object_new;
1150 pgraph->base.set_tile_region = nv10_graph_set_tile_region;
1151
1152 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1153 nouveau_irq_register(dev, 12, nv10_graph_isr);
1154
1155 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1156 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1157 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1158 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1159 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1160 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1161 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1162 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1163 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1164 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1165 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1166 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1167 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1168 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1169 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1170 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1171 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1172
1173 /* celcius */
1174 if (dev_priv->chipset <= 0x10) {
1175 NVOBJ_CLASS(dev, 0x0056, GR);
1176 } else
1177 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1178 NVOBJ_CLASS(dev, 0x0096, GR);
1179 } else {
1180 NVOBJ_CLASS(dev, 0x0099, GR);
1181 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1182 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1183 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1184 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1185 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1186 }
1187
1188 return 0;
1189}
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
deleted file mode 100644
index d9e482e4abee..000000000000
--- a/drivers/gpu/drm/nouveau/nv17_fifo.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv17_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 {}
56};
57
58struct nv17_fifo_priv {
59 struct nouveau_fifo_priv base;
60 struct ramfc_desc *ramfc_desc;
61};
62
63struct nv17_fifo_chan {
64 struct nouveau_fifo_chan base;
65 struct nouveau_gpuobj *ramfc;
66};
67
68static int
69nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
70{
71 struct drm_device *dev = chan->dev;
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
74 struct nv17_fifo_chan *fctx;
75 unsigned long flags;
76 int ret;
77
78 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
79 if (!fctx)
80 return -ENOMEM;
81
82 /* map channel control registers */
83 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
84 NV03_USER(chan->id), PAGE_SIZE);
85 if (!chan->user) {
86 ret = -ENOMEM;
87 goto error;
88 }
89
90 /* initialise default fifo context */
91 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
92 chan->id * 64, ~0, 64,
93 NVOBJ_FLAG_ZERO_ALLOC |
94 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
95 if (ret)
96 goto error;
97
98 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
99 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
100 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
101 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
103#ifdef __BIG_ENDIAN
104 NV_PFIFO_CACHE1_BIG_ENDIAN |
105#endif
106 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
107
108 /* enable dma mode on the channel */
109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
110 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
111 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
112
113error:
114 if (ret)
115 priv->base.base.context_del(chan, engine);
116 return ret;
117}
118
119static int
120nv17_fifo_init(struct drm_device *dev, int engine)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
124 int i;
125
126 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
127 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
128
129 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
130 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
131
132 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
133 ((dev_priv->ramht->bits - 9) << 16) |
134 (dev_priv->ramht->gpuobj->pinst >> 8));
135 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
136 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
137 dev_priv->ramfc->pinst >> 8);
138
139 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
140
141 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
142 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
143
144 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
145 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
146 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
147
148 for (i = 0; i < priv->base.channels; i++) {
149 if (dev_priv->channels.ptr[i])
150 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
151 }
152
153 return 0;
154}
155
156int
157nv17_fifo_create(struct drm_device *dev)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nv17_fifo_priv *priv;
161
162 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
163 if (!priv)
164 return -ENOMEM;
165
166 priv->base.base.destroy = nv04_fifo_destroy;
167 priv->base.base.init = nv17_fifo_init;
168 priv->base.base.fini = nv04_fifo_fini;
169 priv->base.base.context_new = nv17_fifo_context_new;
170 priv->base.base.context_del = nv04_fifo_context_del;
171 priv->base.channels = 31;
172 priv->ramfc_desc = nv17_ramfc;
173 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
174
175 nouveau_irq_register(dev, 8, nv04_fifo_isr);
176 return 0;
177}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 67be5db021f5..dd85f0f79acf 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -26,18 +26,32 @@
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h"
30#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 32#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
33#include "nouveau_gpio.h"
34#include "nouveau_hw.h" 34#include "nouveau_hw.h"
35#include "nv17_tv.h" 35#include "nv17_tv.h"
36 36
37#include <core/device.h>
38
39#include <subdev/bios/gpio.h>
40#include <subdev/gpio.h>
41
42MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
43 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
44 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
45 "\t\tDefault: PAL\n"
46 "\t\t*NOTE* Ignored for cards with external TV encoders.");
47static char *nouveau_tv_norm;
48module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
49
37static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) 50static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
38{ 51{
39 struct drm_device *dev = encoder->dev; 52 struct drm_device *dev = encoder->dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
41 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 55 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
42 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, 56 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
43 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; 57 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -46,15 +60,15 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
46 60
47#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 61#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
48 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); 62 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
49 if (dev_priv->vbios.tvdactestval) 63 if (drm->vbios.tvdactestval)
50 testval = dev_priv->vbios.tvdactestval; 64 testval = drm->vbios.tvdactestval;
51 65
52 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); 66 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
53 head = (dacclk & 0x100) >> 8; 67 head = (dacclk & 0x100) >> 8;
54 68
55 /* Save the previous state. */ 69 /* Save the previous state. */
56 gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); 70 gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
57 gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); 71 gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
58 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); 72 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
59 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); 73 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
60 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); 74 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +79,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
65 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); 79 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
66 80
67 /* Prepare the DAC for load detection. */ 81 /* Prepare the DAC for load detection. */
68 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true); 82 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
69 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true); 83 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
70 84
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); 85 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); 86 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +125,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); 125 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); 126 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
113 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); 127 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
114 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1); 128 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
115 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0); 129 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
116 130
117 return sample; 131 return sample;
118} 132}
@@ -120,15 +134,18 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
120static bool 134static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 135get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 136{
137 struct nouveau_drm *drm = nouveau_drm(dev);
138 struct nouveau_object *device = drm->device;
139
123 /* Zotac FX5200 */ 140 /* Zotac FX5200 */
124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) || 141 if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) { 142 nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
126 *pin_mask = 0xc; 143 *pin_mask = 0xc;
127 return false; 144 return false;
128 } 145 }
129 146
130 /* MSI nForce2 IGP */ 147 /* MSI nForce2 IGP */
131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) { 148 if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
132 *pin_mask = 0xc; 149 *pin_mask = 0xc;
133 return false; 150 return false;
134 } 151 }
@@ -140,18 +157,18 @@ static enum drm_connector_status
140nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) 157nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
141{ 158{
142 struct drm_device *dev = encoder->dev; 159 struct drm_device *dev = encoder->dev;
143 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct nouveau_drm *drm = nouveau_drm(dev);
144 struct drm_mode_config *conf = &dev->mode_config; 161 struct drm_mode_config *conf = &dev->mode_config;
145 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 162 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
146 struct dcb_entry *dcb = tv_enc->base.dcb; 163 struct dcb_output *dcb = tv_enc->base.dcb;
147 bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); 164 bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
148 165
149 if (nv04_dac_in_use(encoder)) 166 if (nv04_dac_in_use(encoder))
150 return connector_status_disconnected; 167 return connector_status_disconnected;
151 168
152 if (reliable) { 169 if (reliable) {
153 if (dev_priv->chipset == 0x42 || 170 if (nv_device(drm->device)->chipset == 0x42 ||
154 dev_priv->chipset == 0x43) 171 nv_device(drm->device)->chipset == 0x43)
155 tv_enc->pin_mask = 172 tv_enc->pin_mask =
156 nv42_tv_sample_load(encoder) >> 28 & 0xe; 173 nv42_tv_sample_load(encoder) >> 28 & 0xe;
157 else 174 else
@@ -185,7 +202,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
185 if (!reliable) { 202 if (!reliable) {
186 return connector_status_unknown; 203 return connector_status_unknown;
187 } else if (tv_enc->subconnector) { 204 } else if (tv_enc->subconnector) {
188 NV_INFO(dev, "Load detected on output %c\n", 205 NV_INFO(drm, "Load detected on output %c\n",
189 '@' + ffs(dcb->or)); 206 '@' + ffs(dcb->or));
190 return connector_status_connected; 207 return connector_status_connected;
191 } else { 208 } else {
@@ -357,6 +374,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
357static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) 374static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
358{ 375{
359 struct drm_device *dev = encoder->dev; 376 struct drm_device *dev = encoder->dev;
377 struct nouveau_drm *drm = nouveau_drm(dev);
378 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
360 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; 379 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
361 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 380 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
362 381
@@ -364,7 +383,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
364 return; 383 return;
365 nouveau_encoder(encoder)->last_dpms = mode; 384 nouveau_encoder(encoder)->last_dpms = mode;
366 385
367 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", 386 NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
368 mode, nouveau_encoder(encoder)->dcb->index); 387 mode, nouveau_encoder(encoder)->dcb->index);
369 388
370 regs->ptv_200 &= ~1; 389 regs->ptv_200 &= ~1;
@@ -381,8 +400,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
381 400
382 nv_load_ptv(dev, regs, 200); 401 nv_load_ptv(dev, regs, 200);
383 402
384 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); 403 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
385 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); 404 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
386 405
387 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 406 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
388} 407}
@@ -390,11 +409,11 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
390static void nv17_tv_prepare(struct drm_encoder *encoder) 409static void nv17_tv_prepare(struct drm_encoder *encoder)
391{ 410{
392 struct drm_device *dev = encoder->dev; 411 struct drm_device *dev = encoder->dev;
393 struct drm_nouveau_private *dev_priv = dev->dev_private; 412 struct nouveau_drm *drm = nouveau_drm(dev);
394 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 413 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
395 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 414 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
396 int head = nouveau_crtc(encoder->crtc)->index; 415 int head = nouveau_crtc(encoder->crtc)->index;
397 uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[ 416 uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
398 NV_CIO_CRE_LCD__INDEX]; 417 NV_CIO_CRE_LCD__INDEX];
399 uint32_t dacclk_off = NV_PRAMDAC_DACCLK + 418 uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
400 nv04_dac_output_offset(encoder); 419 nv04_dac_output_offset(encoder);
@@ -410,14 +429,14 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
410 struct drm_encoder *enc; 429 struct drm_encoder *enc;
411 430
412 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 431 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
413 struct dcb_entry *dcb = nouveau_encoder(enc)->dcb; 432 struct dcb_output *dcb = nouveau_encoder(enc)->dcb;
414 433
415 if ((dcb->type == OUTPUT_TMDS || 434 if ((dcb->type == DCB_OUTPUT_TMDS ||
416 dcb->type == OUTPUT_LVDS) && 435 dcb->type == DCB_OUTPUT_LVDS) &&
417 !enc->crtc && 436 !enc->crtc &&
418 nv04_dfp_get_bound_head(dev, dcb) == head) { 437 nv04_dfp_get_bound_head(dev, dcb) == head) {
419 nv04_dfp_bind_head(dev, dcb, head ^ 1, 438 nv04_dfp_bind_head(dev, dcb, head ^ 1,
420 dev_priv->vbios.fp.dual_link); 439 drm->vbios.fp.dual_link);
421 } 440 }
422 } 441 }
423 442
@@ -429,7 +448,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
429 /* Set the DACCLK register */ 448 /* Set the DACCLK register */
430 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; 449 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
431 450
432 if (dev_priv->card_type == NV_40) 451 if (nv_device(drm->device)->card_type == NV_40)
433 dacclk |= 0x1a << 16; 452 dacclk |= 0x1a << 16;
434 453
435 if (tv_norm->kind == CTV_ENC_MODE) { 454 if (tv_norm->kind == CTV_ENC_MODE) {
@@ -453,9 +472,9 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
453 struct drm_display_mode *adjusted_mode) 472 struct drm_display_mode *adjusted_mode)
454{ 473{
455 struct drm_device *dev = encoder->dev; 474 struct drm_device *dev = encoder->dev;
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 475 struct nouveau_drm *drm = nouveau_drm(dev);
457 int head = nouveau_crtc(encoder->crtc)->index; 476 int head = nouveau_crtc(encoder->crtc)->index;
458 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; 477 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
459 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; 478 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
460 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 479 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
461 int i; 480 int i;
@@ -486,7 +505,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
486 tv_regs->ptv_614 = 0x13; 505 tv_regs->ptv_614 = 0x13;
487 } 506 }
488 507
489 if (dev_priv->card_type >= NV_30) { 508 if (nv_device(drm->device)->card_type >= NV_30) {
490 tv_regs->ptv_500 = 0xe8e0; 509 tv_regs->ptv_500 = 0xe8e0;
491 tv_regs->ptv_504 = 0x1710; 510 tv_regs->ptv_504 = 0x1710;
492 tv_regs->ptv_604 = 0x0; 511 tv_regs->ptv_604 = 0x0;
@@ -566,7 +585,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
566static void nv17_tv_commit(struct drm_encoder *encoder) 585static void nv17_tv_commit(struct drm_encoder *encoder)
567{ 586{
568 struct drm_device *dev = encoder->dev; 587 struct drm_device *dev = encoder->dev;
569 struct drm_nouveau_private *dev_priv = dev->dev_private; 588 struct nouveau_drm *drm = nouveau_drm(dev);
570 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 589 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
571 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 590 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
572 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 591 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
@@ -581,7 +600,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); 600 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
582 601
583 /* This could use refinement for flatpanels, but it should work */ 602 /* This could use refinement for flatpanels, but it should work */
584 if (dev_priv->chipset < 0x44) 603 if (nv_device(drm->device)->chipset < 0x44)
585 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + 604 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
586 nv04_dac_output_offset(encoder), 605 nv04_dac_output_offset(encoder),
587 0xf0000000); 606 0xf0000000);
@@ -592,7 +611,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
592 611
593 helper->dpms(encoder, DRM_MODE_DPMS_ON); 612 helper->dpms(encoder, DRM_MODE_DPMS_ON);
594 613
595 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 614 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
596 drm_get_connector_name( 615 drm_get_connector_name(
597 &nouveau_encoder_connector_get(nv_encoder)->base), 616 &nouveau_encoder_connector_get(nv_encoder)->base),
598 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 617 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
@@ -630,9 +649,10 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
630 struct drm_connector *connector) 649 struct drm_connector *connector)
631{ 650{
632 struct drm_device *dev = encoder->dev; 651 struct drm_device *dev = encoder->dev;
652 struct nouveau_drm *drm = nouveau_drm(dev);
633 struct drm_mode_config *conf = &dev->mode_config; 653 struct drm_mode_config *conf = &dev->mode_config;
634 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 654 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
635 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 655 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
636 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : 656 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
637 NUM_LD_TV_NORMS; 657 NUM_LD_TV_NORMS;
638 int i; 658 int i;
@@ -646,7 +666,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
646 } 666 }
647 667
648 if (i == num_tv_norms) 668 if (i == num_tv_norms)
649 NV_WARN(dev, "Invalid TV norm setting \"%s\"\n", 669 NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
650 nouveau_tv_norm); 670 nouveau_tv_norm);
651 } 671 }
652 672
@@ -759,8 +779,6 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
759{ 779{
760 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 780 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
761 781
762 NV_DEBUG_KMS(encoder->dev, "\n");
763
764 drm_encoder_cleanup(encoder); 782 drm_encoder_cleanup(encoder);
765 kfree(tv_enc); 783 kfree(tv_enc);
766} 784}
@@ -788,7 +806,7 @@ static struct drm_encoder_funcs nv17_tv_funcs = {
788}; 806};
789 807
790int 808int
791nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) 809nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
792{ 810{
793 struct drm_device *dev = connector->dev; 811 struct drm_device *dev = connector->dev;
794 struct drm_encoder *encoder; 812 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 622e72221682..7b331543a41b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -130,12 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, 130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val) 131 uint32_t val)
132{ 132{
133 nv_wr32(dev, reg, val); 133 struct nouveau_device *device = nouveau_dev(dev);
134 nv_wr32(device, reg, val);
134} 135}
135 136
136static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
137{ 138{
138 return nv_rd32(dev, reg); 139 struct nouveau_device *device = nouveau_dev(dev);
140 return nv_rd32(device, reg);
139} 141}
140 142
141static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, 143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 4d1d29f60307..a4c4b0c2c7ce 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -26,7 +26,7 @@
26 26
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_hw.h" 32#include "nouveau_hw.h"
@@ -543,10 +543,9 @@ void nv17_tv_update_rescaler(struct drm_encoder *encoder)
543void nv17_ctv_update_rescaler(struct drm_encoder *encoder) 543void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
544{ 544{
545 struct drm_device *dev = encoder->dev; 545 struct drm_device *dev = encoder->dev;
546 struct drm_nouveau_private *dev_priv = dev->dev_private;
547 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 546 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
548 int head = nouveau_crtc(encoder->crtc)->index; 547 int head = nouveau_crtc(encoder->crtc)->index;
549 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; 548 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
550 struct drm_display_mode *crtc_mode = &encoder->crtc->mode; 549 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
551 struct drm_display_mode *output_mode = 550 struct drm_display_mode *output_mode =
552 &get_tv_norm(encoder)->ctv_enc_mode.mode; 551 &get_tv_norm(encoder)->ctv_enc_mode.mode;
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
deleted file mode 100644
index 19bd64059a66..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_fb.c
+++ /dev/null
@@ -1,148 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6static struct drm_mm_node *
7nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
11 struct drm_mm_node *mem;
12 int ret;
13
14 ret = drm_mm_pre_get(&pfb->tag_heap);
15 if (ret)
16 return NULL;
17
18 spin_lock(&dev_priv->tile.lock);
19 mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
20 if (mem)
21 mem = drm_mm_get_block_atomic(mem, size, 0);
22 spin_unlock(&dev_priv->tile.lock);
23
24 return mem;
25}
26
27static void
28nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31 struct drm_mm_node *mem = *pmem;
32 if (mem) {
33 spin_lock(&dev_priv->tile.lock);
34 drm_mm_put_block(mem);
35 spin_unlock(&dev_priv->tile.lock);
36 *pmem = NULL;
37 }
38}
39
40void
41nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
42 uint32_t size, uint32_t pitch, uint32_t flags)
43{
44 struct drm_nouveau_private *dev_priv = dev->dev_private;
45 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
46 int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
47
48 tile->addr = 0x00000001 | addr;
49 tile->limit = max(1u, addr + size) - 1;
50 tile->pitch = pitch;
51
52 /* Allocate some of the on-die tag memory, used to store Z
53 * compression meta-data (most likely just a bitmap determining
54 * if a given tile is compressed or not).
55 */
56 if (flags & NOUVEAU_GEM_TILE_ZETA) {
57 tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
58 if (tile->tag_mem) {
59 /* Enable Z compression */
60 tile->zcomp = tile->tag_mem->start;
61 if (dev_priv->chipset >= 0x25) {
62 if (bpp == 16)
63 tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
64 else
65 tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
66 } else {
67 tile->zcomp |= NV20_PFB_ZCOMP_EN;
68 if (bpp != 16)
69 tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
70 }
71 }
72
73 tile->addr |= 2;
74 }
75}
76
77void
78nv20_fb_free_tile_region(struct drm_device *dev, int i)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
82
83 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
84 nv20_fb_free_tag(dev, &tile->tag_mem);
85}
86
87void
88nv20_fb_set_tile_region(struct drm_device *dev, int i)
89{
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
92
93 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
94 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
95 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
96 nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
97}
98
99int
100nv20_fb_vram_init(struct drm_device *dev)
101{
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 u32 mem_size = nv_rd32(dev, 0x10020c);
104 u32 pbus1218 = nv_rd32(dev, 0x001218);
105
106 dev_priv->vram_size = mem_size & 0xff000000;
107 switch (pbus1218 & 0x00000300) {
108 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
109 case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
110 case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
111 case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
112 }
113
114 return 0;
115}
116
117int
118nv20_fb_init(struct drm_device *dev)
119{
120 struct drm_nouveau_private *dev_priv = dev->dev_private;
121 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
122 int i;
123
124 if (dev_priv->chipset >= 0x25)
125 drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
126 else
127 drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
128
129 /* Turn all the tiling regions off. */
130 pfb->num_tiles = NV10_PFB_TILE__SIZE;
131 for (i = 0; i < pfb->num_tiles; i++)
132 pfb->set_tile_region(dev, i);
133
134 return 0;
135}
136
137void
138nv20_fb_takedown(struct drm_device *dev)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
142 int i;
143
144 for (i = 0; i < pfb->num_tiles; i++)
145 pfb->free_tile_region(dev, i);
146
147 drm_mm_takedown(&pfb->tag_heap);
148}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
deleted file mode 100644
index e34ea30758f6..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ /dev/null
@@ -1,836 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6/*
7 * NV20
8 * -----
9 * There are 3 families :
10 * NV20 is 0x10de:0x020*
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
12 * NV2A is 0x10de:0x02A0
13 *
14 * NV30
15 * -----
16 * There are 3 families :
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
18 * NV34 is 0x10de:0x032*
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
20 *
21 * Not seen in the wild, no dumps (probably NV35) :
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
24 *
25 */
26
27struct nv20_graph_engine {
28 struct nouveau_exec_engine base;
29 struct nouveau_gpuobj *ctxtab;
30 void (*grctx_init)(struct nouveau_gpuobj *);
31 u32 grctx_size;
32 u32 grctx_user;
33};
34
35#define NV20_GRCTX_SIZE (3580*4)
36#define NV25_GRCTX_SIZE (3529*4)
37#define NV2A_GRCTX_SIZE (3500*4)
38
39#define NV30_31_GRCTX_SIZE (24392)
40#define NV34_GRCTX_SIZE (18140)
41#define NV35_36_GRCTX_SIZE (22396)
42
43int
44nv20_graph_unload_context(struct drm_device *dev)
45{
46 struct nouveau_channel *chan;
47 struct nouveau_gpuobj *grctx;
48 u32 tmp;
49
50 chan = nv10_graph_channel(dev);
51 if (!chan)
52 return 0;
53 grctx = chan->engctx[NVOBJ_ENGINE_GR];
54
55 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
56 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
57 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
58
59 nouveau_wait_for_idle(dev);
60
61 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
62 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
63 tmp |= 31 << 24;
64 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
65 return 0;
66}
67
68static void
69nv20_graph_rdi(struct drm_device *dev)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 int i, writecount = 32;
73 uint32_t rdi_index = 0x2c80000;
74
75 if (dev_priv->chipset == 0x20) {
76 rdi_index = 0x3d0000;
77 writecount = 15;
78 }
79
80 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
81 for (i = 0; i < writecount; i++)
82 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
83
84 nouveau_wait_for_idle(dev);
85}
86
87static void
88nv20_graph_context_init(struct nouveau_gpuobj *ctx)
89{
90 int i;
91
92 nv_wo32(ctx, 0x033c, 0xffff0000);
93 nv_wo32(ctx, 0x03a0, 0x0fff0000);
94 nv_wo32(ctx, 0x03a4, 0x0fff0000);
95 nv_wo32(ctx, 0x047c, 0x00000101);
96 nv_wo32(ctx, 0x0490, 0x00000111);
97 nv_wo32(ctx, 0x04a8, 0x44400000);
98 for (i = 0x04d4; i <= 0x04e0; i += 4)
99 nv_wo32(ctx, i, 0x00030303);
100 for (i = 0x04f4; i <= 0x0500; i += 4)
101 nv_wo32(ctx, i, 0x00080000);
102 for (i = 0x050c; i <= 0x0518; i += 4)
103 nv_wo32(ctx, i, 0x01012000);
104 for (i = 0x051c; i <= 0x0528; i += 4)
105 nv_wo32(ctx, i, 0x000105b8);
106 for (i = 0x052c; i <= 0x0538; i += 4)
107 nv_wo32(ctx, i, 0x00080008);
108 for (i = 0x055c; i <= 0x0598; i += 4)
109 nv_wo32(ctx, i, 0x07ff0000);
110 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
111 nv_wo32(ctx, 0x05fc, 0x00000001);
112 nv_wo32(ctx, 0x0604, 0x00004000);
113 nv_wo32(ctx, 0x0610, 0x00000001);
114 nv_wo32(ctx, 0x0618, 0x00040000);
115 nv_wo32(ctx, 0x061c, 0x00010000);
116 for (i = 0x1c1c; i <= 0x248c; i += 16) {
117 nv_wo32(ctx, (i + 0), 0x10700ff9);
118 nv_wo32(ctx, (i + 4), 0x0436086c);
119 nv_wo32(ctx, (i + 8), 0x000c001b);
120 }
121 nv_wo32(ctx, 0x281c, 0x3f800000);
122 nv_wo32(ctx, 0x2830, 0x3f800000);
123 nv_wo32(ctx, 0x285c, 0x40000000);
124 nv_wo32(ctx, 0x2860, 0x3f800000);
125 nv_wo32(ctx, 0x2864, 0x3f000000);
126 nv_wo32(ctx, 0x286c, 0x40000000);
127 nv_wo32(ctx, 0x2870, 0x3f800000);
128 nv_wo32(ctx, 0x2878, 0xbf800000);
129 nv_wo32(ctx, 0x2880, 0xbf800000);
130 nv_wo32(ctx, 0x34a4, 0x000fe000);
131 nv_wo32(ctx, 0x3530, 0x000003f8);
132 nv_wo32(ctx, 0x3540, 0x002fe000);
133 for (i = 0x355c; i <= 0x3578; i += 4)
134 nv_wo32(ctx, i, 0x001c527c);
135}
136
137static void
138nv25_graph_context_init(struct nouveau_gpuobj *ctx)
139{
140 int i;
141
142 nv_wo32(ctx, 0x035c, 0xffff0000);
143 nv_wo32(ctx, 0x03c0, 0x0fff0000);
144 nv_wo32(ctx, 0x03c4, 0x0fff0000);
145 nv_wo32(ctx, 0x049c, 0x00000101);
146 nv_wo32(ctx, 0x04b0, 0x00000111);
147 nv_wo32(ctx, 0x04c8, 0x00000080);
148 nv_wo32(ctx, 0x04cc, 0xffff0000);
149 nv_wo32(ctx, 0x04d0, 0x00000001);
150 nv_wo32(ctx, 0x04e4, 0x44400000);
151 nv_wo32(ctx, 0x04fc, 0x4b800000);
152 for (i = 0x0510; i <= 0x051c; i += 4)
153 nv_wo32(ctx, i, 0x00030303);
154 for (i = 0x0530; i <= 0x053c; i += 4)
155 nv_wo32(ctx, i, 0x00080000);
156 for (i = 0x0548; i <= 0x0554; i += 4)
157 nv_wo32(ctx, i, 0x01012000);
158 for (i = 0x0558; i <= 0x0564; i += 4)
159 nv_wo32(ctx, i, 0x000105b8);
160 for (i = 0x0568; i <= 0x0574; i += 4)
161 nv_wo32(ctx, i, 0x00080008);
162 for (i = 0x0598; i <= 0x05d4; i += 4)
163 nv_wo32(ctx, i, 0x07ff0000);
164 nv_wo32(ctx, 0x05e0, 0x4b7fffff);
165 nv_wo32(ctx, 0x0620, 0x00000080);
166 nv_wo32(ctx, 0x0624, 0x30201000);
167 nv_wo32(ctx, 0x0628, 0x70605040);
168 nv_wo32(ctx, 0x062c, 0xb0a09080);
169 nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
170 nv_wo32(ctx, 0x0664, 0x00000001);
171 nv_wo32(ctx, 0x066c, 0x00004000);
172 nv_wo32(ctx, 0x0678, 0x00000001);
173 nv_wo32(ctx, 0x0680, 0x00040000);
174 nv_wo32(ctx, 0x0684, 0x00010000);
175 for (i = 0x1b04; i <= 0x2374; i += 16) {
176 nv_wo32(ctx, (i + 0), 0x10700ff9);
177 nv_wo32(ctx, (i + 4), 0x0436086c);
178 nv_wo32(ctx, (i + 8), 0x000c001b);
179 }
180 nv_wo32(ctx, 0x2704, 0x3f800000);
181 nv_wo32(ctx, 0x2718, 0x3f800000);
182 nv_wo32(ctx, 0x2744, 0x40000000);
183 nv_wo32(ctx, 0x2748, 0x3f800000);
184 nv_wo32(ctx, 0x274c, 0x3f000000);
185 nv_wo32(ctx, 0x2754, 0x40000000);
186 nv_wo32(ctx, 0x2758, 0x3f800000);
187 nv_wo32(ctx, 0x2760, 0xbf800000);
188 nv_wo32(ctx, 0x2768, 0xbf800000);
189 nv_wo32(ctx, 0x308c, 0x000fe000);
190 nv_wo32(ctx, 0x3108, 0x000003f8);
191 nv_wo32(ctx, 0x3468, 0x002fe000);
192 for (i = 0x3484; i <= 0x34a0; i += 4)
193 nv_wo32(ctx, i, 0x001c527c);
194}
195
196static void
197nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
198{
199 int i;
200
201 nv_wo32(ctx, 0x033c, 0xffff0000);
202 nv_wo32(ctx, 0x03a0, 0x0fff0000);
203 nv_wo32(ctx, 0x03a4, 0x0fff0000);
204 nv_wo32(ctx, 0x047c, 0x00000101);
205 nv_wo32(ctx, 0x0490, 0x00000111);
206 nv_wo32(ctx, 0x04a8, 0x44400000);
207 for (i = 0x04d4; i <= 0x04e0; i += 4)
208 nv_wo32(ctx, i, 0x00030303);
209 for (i = 0x04f4; i <= 0x0500; i += 4)
210 nv_wo32(ctx, i, 0x00080000);
211 for (i = 0x050c; i <= 0x0518; i += 4)
212 nv_wo32(ctx, i, 0x01012000);
213 for (i = 0x051c; i <= 0x0528; i += 4)
214 nv_wo32(ctx, i, 0x000105b8);
215 for (i = 0x052c; i <= 0x0538; i += 4)
216 nv_wo32(ctx, i, 0x00080008);
217 for (i = 0x055c; i <= 0x0598; i += 4)
218 nv_wo32(ctx, i, 0x07ff0000);
219 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
220 nv_wo32(ctx, 0x05fc, 0x00000001);
221 nv_wo32(ctx, 0x0604, 0x00004000);
222 nv_wo32(ctx, 0x0610, 0x00000001);
223 nv_wo32(ctx, 0x0618, 0x00040000);
224 nv_wo32(ctx, 0x061c, 0x00010000);
225 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
226 nv_wo32(ctx, (i + 0), 0x10700ff9);
227 nv_wo32(ctx, (i + 4), 0x0436086c);
228 nv_wo32(ctx, (i + 8), 0x000c001b);
229 }
230 nv_wo32(ctx, 0x269c, 0x3f800000);
231 nv_wo32(ctx, 0x26b0, 0x3f800000);
232 nv_wo32(ctx, 0x26dc, 0x40000000);
233 nv_wo32(ctx, 0x26e0, 0x3f800000);
234 nv_wo32(ctx, 0x26e4, 0x3f000000);
235 nv_wo32(ctx, 0x26ec, 0x40000000);
236 nv_wo32(ctx, 0x26f0, 0x3f800000);
237 nv_wo32(ctx, 0x26f8, 0xbf800000);
238 nv_wo32(ctx, 0x2700, 0xbf800000);
239 nv_wo32(ctx, 0x3024, 0x000fe000);
240 nv_wo32(ctx, 0x30a0, 0x000003f8);
241 nv_wo32(ctx, 0x33fc, 0x002fe000);
242 for (i = 0x341c; i <= 0x3438; i += 4)
243 nv_wo32(ctx, i, 0x001c527c);
244}
245
246static void
247nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
248{
249 int i;
250
251 nv_wo32(ctx, 0x0410, 0x00000101);
252 nv_wo32(ctx, 0x0424, 0x00000111);
253 nv_wo32(ctx, 0x0428, 0x00000060);
254 nv_wo32(ctx, 0x0444, 0x00000080);
255 nv_wo32(ctx, 0x0448, 0xffff0000);
256 nv_wo32(ctx, 0x044c, 0x00000001);
257 nv_wo32(ctx, 0x0460, 0x44400000);
258 nv_wo32(ctx, 0x048c, 0xffff0000);
259 for (i = 0x04e0; i < 0x04e8; i += 4)
260 nv_wo32(ctx, i, 0x0fff0000);
261 nv_wo32(ctx, 0x04ec, 0x00011100);
262 for (i = 0x0508; i < 0x0548; i += 4)
263 nv_wo32(ctx, i, 0x07ff0000);
264 nv_wo32(ctx, 0x0550, 0x4b7fffff);
265 nv_wo32(ctx, 0x058c, 0x00000080);
266 nv_wo32(ctx, 0x0590, 0x30201000);
267 nv_wo32(ctx, 0x0594, 0x70605040);
268 nv_wo32(ctx, 0x0598, 0xb8a89888);
269 nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
270 nv_wo32(ctx, 0x05b0, 0xb0000000);
271 for (i = 0x0600; i < 0x0640; i += 4)
272 nv_wo32(ctx, i, 0x00010588);
273 for (i = 0x0640; i < 0x0680; i += 4)
274 nv_wo32(ctx, i, 0x00030303);
275 for (i = 0x06c0; i < 0x0700; i += 4)
276 nv_wo32(ctx, i, 0x0008aae4);
277 for (i = 0x0700; i < 0x0740; i += 4)
278 nv_wo32(ctx, i, 0x01012000);
279 for (i = 0x0740; i < 0x0780; i += 4)
280 nv_wo32(ctx, i, 0x00080008);
281 nv_wo32(ctx, 0x085c, 0x00040000);
282 nv_wo32(ctx, 0x0860, 0x00010000);
283 for (i = 0x0864; i < 0x0874; i += 4)
284 nv_wo32(ctx, i, 0x00040004);
285 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
286 nv_wo32(ctx, i + 0, 0x10700ff9);
287 nv_wo32(ctx, i + 1, 0x0436086c);
288 nv_wo32(ctx, i + 2, 0x000c001b);
289 }
290 for (i = 0x30b8; i < 0x30c8; i += 4)
291 nv_wo32(ctx, i, 0x0000ffff);
292 nv_wo32(ctx, 0x344c, 0x3f800000);
293 nv_wo32(ctx, 0x3808, 0x3f800000);
294 nv_wo32(ctx, 0x381c, 0x3f800000);
295 nv_wo32(ctx, 0x3848, 0x40000000);
296 nv_wo32(ctx, 0x384c, 0x3f800000);
297 nv_wo32(ctx, 0x3850, 0x3f000000);
298 nv_wo32(ctx, 0x3858, 0x40000000);
299 nv_wo32(ctx, 0x385c, 0x3f800000);
300 nv_wo32(ctx, 0x3864, 0xbf800000);
301 nv_wo32(ctx, 0x386c, 0xbf800000);
302}
303
304static void
305nv34_graph_context_init(struct nouveau_gpuobj *ctx)
306{
307 int i;
308
309 nv_wo32(ctx, 0x040c, 0x01000101);
310 nv_wo32(ctx, 0x0420, 0x00000111);
311 nv_wo32(ctx, 0x0424, 0x00000060);
312 nv_wo32(ctx, 0x0440, 0x00000080);
313 nv_wo32(ctx, 0x0444, 0xffff0000);
314 nv_wo32(ctx, 0x0448, 0x00000001);
315 nv_wo32(ctx, 0x045c, 0x44400000);
316 nv_wo32(ctx, 0x0480, 0xffff0000);
317 for (i = 0x04d4; i < 0x04dc; i += 4)
318 nv_wo32(ctx, i, 0x0fff0000);
319 nv_wo32(ctx, 0x04e0, 0x00011100);
320 for (i = 0x04fc; i < 0x053c; i += 4)
321 nv_wo32(ctx, i, 0x07ff0000);
322 nv_wo32(ctx, 0x0544, 0x4b7fffff);
323 nv_wo32(ctx, 0x057c, 0x00000080);
324 nv_wo32(ctx, 0x0580, 0x30201000);
325 nv_wo32(ctx, 0x0584, 0x70605040);
326 nv_wo32(ctx, 0x0588, 0xb8a89888);
327 nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
328 nv_wo32(ctx, 0x05a0, 0xb0000000);
329 for (i = 0x05f0; i < 0x0630; i += 4)
330 nv_wo32(ctx, i, 0x00010588);
331 for (i = 0x0630; i < 0x0670; i += 4)
332 nv_wo32(ctx, i, 0x00030303);
333 for (i = 0x06b0; i < 0x06f0; i += 4)
334 nv_wo32(ctx, i, 0x0008aae4);
335 for (i = 0x06f0; i < 0x0730; i += 4)
336 nv_wo32(ctx, i, 0x01012000);
337 for (i = 0x0730; i < 0x0770; i += 4)
338 nv_wo32(ctx, i, 0x00080008);
339 nv_wo32(ctx, 0x0850, 0x00040000);
340 nv_wo32(ctx, 0x0854, 0x00010000);
341 for (i = 0x0858; i < 0x0868; i += 4)
342 nv_wo32(ctx, i, 0x00040004);
343 for (i = 0x15ac; i <= 0x271c ; i += 16) {
344 nv_wo32(ctx, i + 0, 0x10700ff9);
345 nv_wo32(ctx, i + 1, 0x0436086c);
346 nv_wo32(ctx, i + 2, 0x000c001b);
347 }
348 for (i = 0x274c; i < 0x275c; i += 4)
349 nv_wo32(ctx, i, 0x0000ffff);
350 nv_wo32(ctx, 0x2ae0, 0x3f800000);
351 nv_wo32(ctx, 0x2e9c, 0x3f800000);
352 nv_wo32(ctx, 0x2eb0, 0x3f800000);
353 nv_wo32(ctx, 0x2edc, 0x40000000);
354 nv_wo32(ctx, 0x2ee0, 0x3f800000);
355 nv_wo32(ctx, 0x2ee4, 0x3f000000);
356 nv_wo32(ctx, 0x2eec, 0x40000000);
357 nv_wo32(ctx, 0x2ef0, 0x3f800000);
358 nv_wo32(ctx, 0x2ef8, 0xbf800000);
359 nv_wo32(ctx, 0x2f00, 0xbf800000);
360}
361
362static void
363nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
364{
365 int i;
366
367 nv_wo32(ctx, 0x040c, 0x00000101);
368 nv_wo32(ctx, 0x0420, 0x00000111);
369 nv_wo32(ctx, 0x0424, 0x00000060);
370 nv_wo32(ctx, 0x0440, 0x00000080);
371 nv_wo32(ctx, 0x0444, 0xffff0000);
372 nv_wo32(ctx, 0x0448, 0x00000001);
373 nv_wo32(ctx, 0x045c, 0x44400000);
374 nv_wo32(ctx, 0x0488, 0xffff0000);
375 for (i = 0x04dc; i < 0x04e4; i += 4)
376 nv_wo32(ctx, i, 0x0fff0000);
377 nv_wo32(ctx, 0x04e8, 0x00011100);
378 for (i = 0x0504; i < 0x0544; i += 4)
379 nv_wo32(ctx, i, 0x07ff0000);
380 nv_wo32(ctx, 0x054c, 0x4b7fffff);
381 nv_wo32(ctx, 0x0588, 0x00000080);
382 nv_wo32(ctx, 0x058c, 0x30201000);
383 nv_wo32(ctx, 0x0590, 0x70605040);
384 nv_wo32(ctx, 0x0594, 0xb8a89888);
385 nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
386 nv_wo32(ctx, 0x05ac, 0xb0000000);
387 for (i = 0x0604; i < 0x0644; i += 4)
388 nv_wo32(ctx, i, 0x00010588);
389 for (i = 0x0644; i < 0x0684; i += 4)
390 nv_wo32(ctx, i, 0x00030303);
391 for (i = 0x06c4; i < 0x0704; i += 4)
392 nv_wo32(ctx, i, 0x0008aae4);
393 for (i = 0x0704; i < 0x0744; i += 4)
394 nv_wo32(ctx, i, 0x01012000);
395 for (i = 0x0744; i < 0x0784; i += 4)
396 nv_wo32(ctx, i, 0x00080008);
397 nv_wo32(ctx, 0x0860, 0x00040000);
398 nv_wo32(ctx, 0x0864, 0x00010000);
399 for (i = 0x0868; i < 0x0878; i += 4)
400 nv_wo32(ctx, i, 0x00040004);
401 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
402 nv_wo32(ctx, i + 0, 0x10700ff9);
403 nv_wo32(ctx, i + 4, 0x0436086c);
404 nv_wo32(ctx, i + 8, 0x000c001b);
405 }
406 for (i = 0x30bc; i < 0x30cc; i += 4)
407 nv_wo32(ctx, i, 0x0000ffff);
408 nv_wo32(ctx, 0x3450, 0x3f800000);
409 nv_wo32(ctx, 0x380c, 0x3f800000);
410 nv_wo32(ctx, 0x3820, 0x3f800000);
411 nv_wo32(ctx, 0x384c, 0x40000000);
412 nv_wo32(ctx, 0x3850, 0x3f800000);
413 nv_wo32(ctx, 0x3854, 0x3f000000);
414 nv_wo32(ctx, 0x385c, 0x40000000);
415 nv_wo32(ctx, 0x3860, 0x3f800000);
416 nv_wo32(ctx, 0x3868, 0xbf800000);
417 nv_wo32(ctx, 0x3870, 0xbf800000);
418}
419
420int
421nv20_graph_context_new(struct nouveau_channel *chan, int engine)
422{
423 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
424 struct nouveau_gpuobj *grctx = NULL;
425 struct drm_device *dev = chan->dev;
426 int ret;
427
428 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
429 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
430 if (ret)
431 return ret;
432
433 /* Initialise default context values */
434 pgraph->grctx_init(grctx);
435
436 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
437 /* CTX_USER */
438 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
439
440 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
441 chan->engctx[engine] = grctx;
442 return 0;
443}
444
445void
446nv20_graph_context_del(struct nouveau_channel *chan, int engine)
447{
448 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
449 struct nouveau_gpuobj *grctx = chan->engctx[engine];
450 struct drm_device *dev = chan->dev;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 unsigned long flags;
453
454 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
455 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
456
457 /* Unload the context if it's the currently active one */
458 if (nv10_graph_channel(dev) == chan)
459 nv20_graph_unload_context(dev);
460
461 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
462 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
463
464 /* Free the context resources */
465 nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
466
467 nouveau_gpuobj_ref(NULL, &grctx);
468 chan->engctx[engine] = NULL;
469}
470
471static void
472nv20_graph_set_tile_region(struct drm_device *dev, int i)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
476
477 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
478 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
479 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
480
481 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
482 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
483 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
484 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
485 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
486 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
487
488 if (dev_priv->card_type == NV_20) {
489 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
490 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
491 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
492 }
493}
494
495int
496nv20_graph_init(struct drm_device *dev, int engine)
497{
498 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500 uint32_t tmp, vramsz;
501 int i;
502
503 nv_wr32(dev, NV03_PMC_ENABLE,
504 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
505 nv_wr32(dev, NV03_PMC_ENABLE,
506 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
507
508 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
509
510 nv20_graph_rdi(dev);
511
512 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
513 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
514
515 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
516 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
517 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
518 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
519 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
520 nv_wr32(dev, 0x40009C , 0x00000040);
521
522 if (dev_priv->chipset >= 0x25) {
523 nv_wr32(dev, 0x400890, 0x00a8cfff);
524 nv_wr32(dev, 0x400610, 0x304B1FB6);
525 nv_wr32(dev, 0x400B80, 0x1cbd3883);
526 nv_wr32(dev, 0x400B84, 0x44000000);
527 nv_wr32(dev, 0x400098, 0x40000080);
528 nv_wr32(dev, 0x400B88, 0x000000ff);
529
530 } else {
531 nv_wr32(dev, 0x400880, 0x0008c7df);
532 nv_wr32(dev, 0x400094, 0x00000005);
533 nv_wr32(dev, 0x400B80, 0x45eae20e);
534 nv_wr32(dev, 0x400B84, 0x24000000);
535 nv_wr32(dev, 0x400098, 0x00000040);
536 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
537 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
538 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
539 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
540 }
541
542 /* Turn all the tiling regions off. */
543 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
544 nv20_graph_set_tile_region(dev, i);
545
546 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
547 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
548 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
549
550 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
551 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
552
553 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
554 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
555 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
556 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
557
558 /* begin RAM config */
559 vramsz = pci_resource_len(dev->pdev, 0) - 1;
560 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
561 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
562 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
563 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
564 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
565 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
566 nv_wr32(dev, 0x400820, 0);
567 nv_wr32(dev, 0x400824, 0);
568 nv_wr32(dev, 0x400864, vramsz - 1);
569 nv_wr32(dev, 0x400868, vramsz - 1);
570
571 /* interesting.. the below overwrites some of the tile setup above.. */
572 nv_wr32(dev, 0x400B20, 0x00000000);
573 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
574
575 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
576 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
577 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
578 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
579
580 return 0;
581}
582
583int
584nv30_graph_init(struct drm_device *dev, int engine)
585{
586 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
587 struct drm_nouveau_private *dev_priv = dev->dev_private;
588 int i;
589
590 nv_wr32(dev, NV03_PMC_ENABLE,
591 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
592 nv_wr32(dev, NV03_PMC_ENABLE,
593 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
594
595 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
596
597 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
598 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
599
600 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
601 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
602 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
603 nv_wr32(dev, 0x400890, 0x01b463ff);
604 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
605 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
606 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
607 nv_wr32(dev, 0x400B80, 0x1003d888);
608 nv_wr32(dev, 0x400B84, 0x0c000000);
609 nv_wr32(dev, 0x400098, 0x00000000);
610 nv_wr32(dev, 0x40009C, 0x0005ad00);
611 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
612 nv_wr32(dev, 0x4000a0, 0x00000000);
613 nv_wr32(dev, 0x4000a4, 0x00000008);
614 nv_wr32(dev, 0x4008a8, 0xb784a400);
615 nv_wr32(dev, 0x400ba0, 0x002f8685);
616 nv_wr32(dev, 0x400ba4, 0x00231f3f);
617 nv_wr32(dev, 0x4008a4, 0x40000020);
618
619 if (dev_priv->chipset == 0x34) {
620 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
621 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
622 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
623 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
626 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
627 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
628 }
629
630 nv_wr32(dev, 0x4000c0, 0x00000016);
631
632 /* Turn all the tiling regions off. */
633 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
634 nv20_graph_set_tile_region(dev, i);
635
636 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
637 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
638 nv_wr32(dev, 0x0040075c , 0x00000001);
639
640 /* begin RAM config */
641 /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
642 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
643 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
644 if (dev_priv->chipset != 0x34) {
645 nv_wr32(dev, 0x400750, 0x00EA0000);
646 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
647 nv_wr32(dev, 0x400750, 0x00EA0004);
648 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
649 }
650
651 return 0;
652}
653
654int
655nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
656{
657 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
658 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
659 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
660 return -EBUSY;
661 }
662 nv20_graph_unload_context(dev);
663 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
664 return 0;
665}
666
667static void
668nv20_graph_isr(struct drm_device *dev)
669{
670 u32 stat;
671
672 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
673 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
674 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
675 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
676 u32 chid = (addr & 0x01f00000) >> 20;
677 u32 subc = (addr & 0x00070000) >> 16;
678 u32 mthd = (addr & 0x00001ffc);
679 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
680 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
681 u32 show = stat;
682
683 if (stat & NV_PGRAPH_INTR_ERROR) {
684 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
685 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
686 show &= ~NV_PGRAPH_INTR_ERROR;
687 }
688 }
689
690 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
691 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
692
693 if (show && nouveau_ratelimit()) {
694 NV_INFO(dev, "PGRAPH -");
695 nouveau_bitfield_print(nv10_graph_intr, show);
696 printk(" nsource:");
697 nouveau_bitfield_print(nv04_graph_nsource, nsource);
698 printk(" nstatus:");
699 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
700 printk("\n");
701 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
702 "mthd 0x%04x data 0x%08x\n",
703 chid, subc, class, mthd, data);
704 }
705 }
706}
707
708static void
709nv20_graph_destroy(struct drm_device *dev, int engine)
710{
711 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
712
713 nouveau_irq_unregister(dev, 12);
714 nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
715
716 NVOBJ_ENGINE_DEL(dev, GR);
717 kfree(pgraph);
718}
719
720int
721nv20_graph_create(struct drm_device *dev)
722{
723 struct drm_nouveau_private *dev_priv = dev->dev_private;
724 struct nv20_graph_engine *pgraph;
725 int ret;
726
727 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
728 if (!pgraph)
729 return -ENOMEM;
730
731 pgraph->base.destroy = nv20_graph_destroy;
732 pgraph->base.fini = nv20_graph_fini;
733 pgraph->base.context_new = nv20_graph_context_new;
734 pgraph->base.context_del = nv20_graph_context_del;
735 pgraph->base.object_new = nv04_graph_object_new;
736 pgraph->base.set_tile_region = nv20_graph_set_tile_region;
737
738 pgraph->grctx_user = 0x0028;
739 if (dev_priv->card_type == NV_20) {
740 pgraph->base.init = nv20_graph_init;
741 switch (dev_priv->chipset) {
742 case 0x20:
743 pgraph->grctx_init = nv20_graph_context_init;
744 pgraph->grctx_size = NV20_GRCTX_SIZE;
745 pgraph->grctx_user = 0x0000;
746 break;
747 case 0x25:
748 case 0x28:
749 pgraph->grctx_init = nv25_graph_context_init;
750 pgraph->grctx_size = NV25_GRCTX_SIZE;
751 break;
752 case 0x2a:
753 pgraph->grctx_init = nv2a_graph_context_init;
754 pgraph->grctx_size = NV2A_GRCTX_SIZE;
755 pgraph->grctx_user = 0x0000;
756 break;
757 default:
758 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
759 kfree(pgraph);
760 return 0;
761 }
762 } else {
763 pgraph->base.init = nv30_graph_init;
764 switch (dev_priv->chipset) {
765 case 0x30:
766 case 0x31:
767 pgraph->grctx_init = nv30_31_graph_context_init;
768 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
769 break;
770 case 0x34:
771 pgraph->grctx_init = nv34_graph_context_init;
772 pgraph->grctx_size = NV34_GRCTX_SIZE;
773 break;
774 case 0x35:
775 case 0x36:
776 pgraph->grctx_init = nv35_36_graph_context_init;
777 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
778 break;
779 default:
780 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
781 kfree(pgraph);
782 return 0;
783 }
784 }
785
786 /* Create Context Pointer Table */
787 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
788 &pgraph->ctxtab);
789 if (ret) {
790 kfree(pgraph);
791 return ret;
792 }
793
794 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
795 nouveau_irq_register(dev, 12, nv20_graph_isr);
796
797 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
798 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
799 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
800 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
801 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
802 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
803 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
804 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
805 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
806 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
807 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
808 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
809 if (dev_priv->card_type == NV_20) {
810 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
811 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
812
813 /* kelvin */
814 if (dev_priv->chipset < 0x25)
815 NVOBJ_CLASS(dev, 0x0097, GR);
816 else
817 NVOBJ_CLASS(dev, 0x0597, GR);
818 } else {
819 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
820 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
821 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
822 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
823
824 /* rankine */
825 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
826 NVOBJ_CLASS(dev, 0x0397, GR);
827 else
828 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
829 NVOBJ_CLASS(dev, 0x0697, GR);
830 else
831 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
832 NVOBJ_CLASS(dev, 0x0497, GR);
833 }
834
835 return 0;
836}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
deleted file mode 100644
index e0135f0e2144..000000000000
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31
32void
33nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
34 uint32_t size, uint32_t pitch, uint32_t flags)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
38
39 tile->addr = addr | 1;
40 tile->limit = max(1u, addr + size) - 1;
41 tile->pitch = pitch;
42}
43
44void
45nv30_fb_free_tile_region(struct drm_device *dev, int i)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
49
50 tile->addr = tile->limit = tile->pitch = 0;
51}
52
53static int
54calc_bias(struct drm_device *dev, int k, int i, int j)
55{
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 int b = (dev_priv->chipset > 0x30 ?
58 nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
59 0) & 0xf;
60
61 return 2 * (b & 0x8 ? b - 0x10 : b);
62}
63
64static int
65calc_ref(struct drm_device *dev, int l, int k, int i)
66{
67 int j, x = 0;
68
69 for (j = 0; j < 4; j++) {
70 int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
71
72 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
73 }
74
75 return x;
76}
77
78int
79nv30_fb_init(struct drm_device *dev)
80{
81 struct drm_nouveau_private *dev_priv = dev->dev_private;
82 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
83 int i, j;
84
85 pfb->num_tiles = NV10_PFB_TILE__SIZE;
86
87 /* Turn all the tiling regions off. */
88 for (i = 0; i < pfb->num_tiles; i++)
89 pfb->set_tile_region(dev, i);
90
91 /* Init the memory timing regs at 0x10037c/0x1003ac */
92 if (dev_priv->chipset == 0x30 ||
93 dev_priv->chipset == 0x31 ||
94 dev_priv->chipset == 0x35) {
95 /* Related to ROP count */
96 int n = (dev_priv->chipset == 0x31 ? 2 : 4);
97 int l = nv_rd32(dev, 0x1003d0);
98
99 for (i = 0; i < n; i++) {
100 for (j = 0; j < 3; j++)
101 nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
102 calc_ref(dev, l, 0, j));
103
104 for (j = 0; j < 2; j++)
105 nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
106 calc_ref(dev, l, 1, j));
107 }
108 }
109
110 return 0;
111}
112
113void
114nv30_fb_takedown(struct drm_device *dev)
115{
116}
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
deleted file mode 100644
index 5f239bf658c4..000000000000
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_fifo.h"
28#include "nouveau_ramht.h"
29
30struct nv31_mpeg_engine {
31 struct nouveau_exec_engine base;
32 atomic_t refcount;
33};
34
35
36static int
37nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
38{
39 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
40
41 if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
42 return -EBUSY;
43
44 chan->engctx[engine] = (void *)0xdeadcafe;
45 return 0;
46}
47
48static void
49nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
50{
51 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
52 atomic_dec(&pmpeg->refcount);
53 chan->engctx[engine] = NULL;
54}
55
56static int
57nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct drm_device *dev = chan->dev;
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpuobj *ctx = NULL;
62 unsigned long flags;
63 int ret;
64
65 NV_DEBUG(dev, "ch%d\n", chan->id);
66
67 ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
68 NVOBJ_FLAG_ZERO_FREE, &ctx);
69 if (ret)
70 return ret;
71
72 nv_wo32(ctx, 0x78, 0x02001ec1);
73
74 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
75 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
76 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
77 nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
78 nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
79 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
81
82 chan->engctx[engine] = ctx;
83 return 0;
84}
85
86static void
87nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
88{
89 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
90 struct nouveau_gpuobj *ctx = chan->engctx[engine];
91 struct drm_device *dev = chan->dev;
92 unsigned long flags;
93 u32 inst = 0x80000000 | (ctx->pinst >> 4);
94
95 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
97 if (nv_rd32(dev, 0x00b318) == inst)
98 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
99 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
100 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103 chan->engctx[engine] = NULL;
104}
105
106static int
107nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
108 u32 handle, u16 class)
109{
110 struct drm_device *dev = chan->dev;
111 struct nouveau_gpuobj *obj = NULL;
112 int ret;
113
114 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
115 NVOBJ_FLAG_ZERO_FREE, &obj);
116 if (ret)
117 return ret;
118 obj->engine = 2;
119 obj->class = class;
120
121 nv_wo32(obj, 0x00, class);
122
123 ret = nouveau_ramht_insert(chan, handle, obj);
124 nouveau_gpuobj_ref(NULL, &obj);
125 return ret;
126}
127
128static int
129nv31_mpeg_init(struct drm_device *dev, int engine)
130{
131 struct drm_nouveau_private *dev_priv = dev->dev_private;
132 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
133 int i;
134
135 /* VPE init */
136 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
137 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
138 nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
139 nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
140
141 for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
142 pmpeg->base.set_tile_region(dev, i);
143
144 /* PMPEG init */
145 nv_wr32(dev, 0x00b32c, 0x00000000);
146 nv_wr32(dev, 0x00b314, 0x00000100);
147 nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
148 nv_wr32(dev, 0x00b300, 0x02001ec1);
149 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
150
151 nv_wr32(dev, 0x00b100, 0xffffffff);
152 nv_wr32(dev, 0x00b140, 0xffffffff);
153
154 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
155 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
156 return -EBUSY;
157 }
158
159 return 0;
160}
161
162static int
163nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{
165 /*XXX: context save? */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0;
169}
170
171static int
172nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
173{
174 struct drm_device *dev = chan->dev;
175 u32 inst = data << 4;
176 u32 dma0 = nv_ri32(dev, inst + 0);
177 u32 dma1 = nv_ri32(dev, inst + 4);
178 u32 dma2 = nv_ri32(dev, inst + 8);
179 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
180 u32 size = dma1 + 1;
181
182 /* only allow linear DMA objects */
183 if (!(dma0 & 0x00002000))
184 return -EINVAL;
185
186 if (mthd == 0x0190) {
187 /* DMA_CMD */
188 nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
189 nv_wr32(dev, 0x00b334, base);
190 nv_wr32(dev, 0x00b324, size);
191 } else
192 if (mthd == 0x01a0) {
193 /* DMA_DATA */
194 nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
195 nv_wr32(dev, 0x00b360, base);
196 nv_wr32(dev, 0x00b364, size);
197 } else {
198 /* DMA_IMAGE, VRAM only */
199 if (dma0 & 0x000c0000)
200 return -EINVAL;
201
202 nv_wr32(dev, 0x00b370, base);
203 nv_wr32(dev, 0x00b374, size);
204 }
205
206 return 0;
207}
208
209static int
210nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
211{
212 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct nouveau_gpuobj *ctx;
215 unsigned long flags;
216 int i;
217
218 /* hardcode drm channel id on nv3x, so swmthd lookup works */
219 if (dev_priv->card_type < NV_40)
220 return 0;
221
222 spin_lock_irqsave(&dev_priv->channels.lock, flags);
223 for (i = 0; i < pfifo->channels; i++) {
224 if (!dev_priv->channels.ptr[i])
225 continue;
226
227 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
228 if (ctx && ctx->pinst == inst)
229 break;
230 }
231 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
232 return i;
233}
234
235static void
236nv31_vpe_set_tile_region(struct drm_device *dev, int i)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
240
241 nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
242 nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
243 nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
244}
245
246static void
247nv31_mpeg_isr(struct drm_device *dev)
248{
249 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
250 u32 chid = nv31_mpeg_isr_chid(dev, inst);
251 u32 stat = nv_rd32(dev, 0x00b100);
252 u32 type = nv_rd32(dev, 0x00b230);
253 u32 mthd = nv_rd32(dev, 0x00b234);
254 u32 data = nv_rd32(dev, 0x00b238);
255 u32 show = stat;
256
257 if (stat & 0x01000000) {
258 /* happens on initial binding of the object */
259 if (type == 0x00000020 && mthd == 0x0000) {
260 nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
261 show &= ~0x01000000;
262 }
263
264 if (type == 0x00000010) {
265 if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
266 show &= ~0x01000000;
267 }
268 }
269
270 nv_wr32(dev, 0x00b100, stat);
271 nv_wr32(dev, 0x00b230, 0x00000001);
272
273 if (show && nouveau_ratelimit()) {
274 NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
275 chid, inst, stat, type, mthd, data);
276 }
277}
278
279static void
280nv31_vpe_isr(struct drm_device *dev)
281{
282 if (nv_rd32(dev, 0x00b100))
283 nv31_mpeg_isr(dev);
284
285 if (nv_rd32(dev, 0x00b800)) {
286 u32 stat = nv_rd32(dev, 0x00b800);
287 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
288 nv_wr32(dev, 0xb800, stat);
289 }
290}
291
292static void
293nv31_mpeg_destroy(struct drm_device *dev, int engine)
294{
295 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
296
297 nouveau_irq_unregister(dev, 0);
298
299 NVOBJ_ENGINE_DEL(dev, MPEG);
300 kfree(pmpeg);
301}
302
303int
304nv31_mpeg_create(struct drm_device *dev)
305{
306 struct drm_nouveau_private *dev_priv = dev->dev_private;
307 struct nv31_mpeg_engine *pmpeg;
308
309 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
310 if (!pmpeg)
311 return -ENOMEM;
312 atomic_set(&pmpeg->refcount, 0);
313
314 pmpeg->base.destroy = nv31_mpeg_destroy;
315 pmpeg->base.init = nv31_mpeg_init;
316 pmpeg->base.fini = nv31_mpeg_fini;
317 if (dev_priv->card_type < NV_40) {
318 pmpeg->base.context_new = nv31_mpeg_context_new;
319 pmpeg->base.context_del = nv31_mpeg_context_del;
320 } else {
321 pmpeg->base.context_new = nv40_mpeg_context_new;
322 pmpeg->base.context_del = nv40_mpeg_context_del;
323 }
324 pmpeg->base.object_new = nv31_mpeg_object_new;
325
326 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
327 * all VPE engines, for this driver's purposes the PMPEG engine
328 * will be treated as the "master" and handle the global VPE
329 * bits too
330 */
331 pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
332 nouveau_irq_register(dev, 0, nv31_vpe_isr);
333
334 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
335 NVOBJ_CLASS(dev, 0x3174, MPEG);
336 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
337 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
338 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
339
340#if 0
341 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
342 NVOBJ_CLASS(dev, 0x4075, ME);
343#endif
344 return 0;
345
346}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
deleted file mode 100644
index 7fbcb334c096..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ /dev/null
@@ -1,163 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6void
7nv40_fb_set_tile_region(struct drm_device *dev, int i)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
11
12 switch (dev_priv->chipset) {
13 case 0x40:
14 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
15 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
16 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
17 break;
18
19 default:
20 nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
21 nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
22 nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
23 break;
24 }
25}
26
27static void
28nv40_fb_init_gart(struct drm_device *dev)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
32
33 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
34 nv_wr32(dev, 0x100800, 0x00000001);
35 return;
36 }
37
38 nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
39 nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
40 nv_wr32(dev, 0x100820, 0x00000000);
41}
42
43static void
44nv44_fb_init_gart(struct drm_device *dev)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
48 u32 vinst;
49
50 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
51 nv_wr32(dev, 0x100850, 0x80000000);
52 nv_wr32(dev, 0x100800, 0x00000001);
53 return;
54 }
55
56 /* calculate vram address of this PRAMIN block, object
57 * must be allocated on 512KiB alignment, and not exceed
58 * a total size of 512KiB for this to work correctly
59 */
60 vinst = nv_rd32(dev, 0x10020c);
61 vinst -= ((gart->pinst >> 19) + 1) << 19;
62
63 nv_wr32(dev, 0x100850, 0x80000000);
64 nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
65
66 nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
67 nv_wr32(dev, 0x100850, 0x00008000);
68 nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
69 nv_wr32(dev, 0x100820, 0x00000000);
70 nv_wr32(dev, 0x10082c, 0x00000001);
71 nv_wr32(dev, 0x100800, vinst | 0x00000010);
72}
73
74int
75nv40_fb_vram_init(struct drm_device *dev)
76{
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78
79 /* 0x001218 is actually present on a few other NV4X I looked at,
80 * and even contains sane values matching 0x100474. From looking
81 * at various vbios images however, this isn't the case everywhere.
82 * So, I chose to use the same regs I've seen NVIDIA reading around
83 * the memory detection, hopefully that'll get us the right numbers
84 */
85 if (dev_priv->chipset == 0x40) {
86 u32 pbus1218 = nv_rd32(dev, 0x001218);
87 switch (pbus1218 & 0x00000300) {
88 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
89 case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
90 case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
91 case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
92 }
93 } else
94 if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
95 u32 pfb914 = nv_rd32(dev, 0x100914);
96 switch (pfb914 & 0x00000003) {
97 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
98 case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
99 case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
100 case 0x00000003: break;
101 }
102 } else
103 if (dev_priv->chipset != 0x4e) {
104 u32 pfb474 = nv_rd32(dev, 0x100474);
105 if (pfb474 & 0x00000004)
106 dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
107 if (pfb474 & 0x00000002)
108 dev_priv->vram_type = NV_MEM_TYPE_DDR2;
109 if (pfb474 & 0x00000001)
110 dev_priv->vram_type = NV_MEM_TYPE_DDR1;
111 } else {
112 dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
113 }
114
115 dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
116 return 0;
117}
118
119int
120nv40_fb_init(struct drm_device *dev)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
124 uint32_t tmp;
125 int i;
126
127 if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
128 if (nv44_graph_class(dev))
129 nv44_fb_init_gart(dev);
130 else
131 nv40_fb_init_gart(dev);
132 }
133
134 switch (dev_priv->chipset) {
135 case 0x40:
136 case 0x45:
137 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
138 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
139 pfb->num_tiles = NV10_PFB_TILE__SIZE;
140 break;
141 case 0x46: /* G72 */
142 case 0x47: /* G70 */
143 case 0x49: /* G71 */
144 case 0x4b: /* G73 */
145 case 0x4c: /* C51 (G7X version) */
146 pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
147 break;
148 default:
149 pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
150 break;
151 }
152
153 /* Turn all the tiling regions off. */
154 for (i = 0; i < pfb->num_tiles; i++)
155 pfb->set_tile_region(dev, i);
156
157 return 0;
158}
159
160void
161nv40_fb_takedown(struct drm_device *dev)
162{
163}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
deleted file mode 100644
index cdc818479b0a..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ /dev/null
@@ -1,210 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_util.h"
32#include "nouveau_ramht.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv40_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
44 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
47 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
48 { 2, 28, 0x18, 28, 0x002058 },
49 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
51 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
52 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
53 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
54 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
55 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
56 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
57 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
58 { 32, 0, 0x40, 0, 0x0032e4 },
59 { 32, 0, 0x44, 0, 0x0032e8 },
60 { 32, 0, 0x4c, 0, 0x002088 },
61 { 32, 0, 0x50, 0, 0x003300 },
62 { 32, 0, 0x54, 0, 0x00330c },
63 {}
64};
65
66struct nv40_fifo_priv {
67 struct nouveau_fifo_priv base;
68 struct ramfc_desc *ramfc_desc;
69};
70
71struct nv40_fifo_chan {
72 struct nouveau_fifo_chan base;
73 struct nouveau_gpuobj *ramfc;
74};
75
76static int
77nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
78{
79 struct drm_device *dev = chan->dev;
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
82 struct nv40_fifo_chan *fctx;
83 unsigned long flags;
84 int ret;
85
86 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
87 if (!fctx)
88 return -ENOMEM;
89
90 /* map channel control registers */
91 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
92 NV03_USER(chan->id), PAGE_SIZE);
93 if (!chan->user) {
94 ret = -ENOMEM;
95 goto error;
96 }
97
98 /* initialise default fifo context */
99 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
100 chan->id * 128, ~0, 128,
101 NVOBJ_FLAG_ZERO_ALLOC |
102 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
103 if (ret)
104 goto error;
105
106 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
107 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
108 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
109 nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
110 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
111 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
112#ifdef __BIG_ENDIAN
113 NV_PFIFO_CACHE1_BIG_ENDIAN |
114#endif
115 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
116 nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
117
118 /* enable dma mode on the channel */
119 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
120 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
121 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
122
123 /*XXX: remove this later, need fifo engine context commit hook */
124 nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
125
126error:
127 if (ret)
128 priv->base.base.context_del(chan, engine);
129 return ret;
130}
131
132static int
133nv40_fifo_init(struct drm_device *dev, int engine)
134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
137 int i;
138
139 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
140 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
141
142 nv_wr32(dev, 0x002040, 0x000000ff);
143 nv_wr32(dev, 0x002044, 0x2101ffff);
144 nv_wr32(dev, 0x002058, 0x00000001);
145
146 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
147 ((dev_priv->ramht->bits - 9) << 16) |
148 (dev_priv->ramht->gpuobj->pinst >> 8));
149 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
150
151 switch (dev_priv->chipset) {
152 case 0x47:
153 case 0x49:
154 case 0x4b:
155 nv_wr32(dev, 0x002230, 0x00000001);
156 case 0x40:
157 case 0x41:
158 case 0x42:
159 case 0x43:
160 case 0x45:
161 case 0x48:
162 nv_wr32(dev, 0x002220, 0x00030002);
163 break;
164 default:
165 nv_wr32(dev, 0x002230, 0x00000000);
166 nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
167 dev_priv->ramfc->pinst) >> 16) |
168 0x00030000);
169 break;
170 }
171
172 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
173
174 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
175 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
176
177 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
178 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
179 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
180
181 for (i = 0; i < priv->base.channels; i++) {
182 if (dev_priv->channels.ptr[i])
183 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
184 }
185
186 return 0;
187}
188
189int
190nv40_fifo_create(struct drm_device *dev)
191{
192 struct drm_nouveau_private *dev_priv = dev->dev_private;
193 struct nv40_fifo_priv *priv;
194
195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
196 if (!priv)
197 return -ENOMEM;
198
199 priv->base.base.destroy = nv04_fifo_destroy;
200 priv->base.base.init = nv40_fifo_init;
201 priv->base.base.fini = nv04_fifo_fini;
202 priv->base.base.context_new = nv40_fifo_context_new;
203 priv->base.base.context_del = nv04_fifo_context_del;
204 priv->base.channels = 31;
205 priv->ramfc_desc = nv40_ramfc;
206 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
207
208 nouveau_irq_register(dev, 8, nv04_fifo_isr);
209 return 0;
210}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
deleted file mode 100644
index aa9e2df64a26..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ /dev/null
@@ -1,467 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32
33struct nv40_graph_engine {
34 struct nouveau_exec_engine base;
35 u32 grctx_size;
36};
37
38static int
39nv40_graph_context_new(struct nouveau_channel *chan, int engine)
40{
41 struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
42 struct drm_device *dev = chan->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_gpuobj *grctx = NULL;
45 unsigned long flags;
46 int ret;
47
48 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
49 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
50 if (ret)
51 return ret;
52
53 /* Initialise default context values */
54 nv40_grctx_fill(dev, grctx);
55 nv_wo32(grctx, 0, grctx->vinst);
56
57 /* init grctx pointer in ramfc, and on PFIFO if channel is
58 * already active there
59 */
60 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
61 nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
62 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
63 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
64 nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
65 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
66 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
67
68 chan->engctx[engine] = grctx;
69 return 0;
70}
71
72static void
73nv40_graph_context_del(struct nouveau_channel *chan, int engine)
74{
75 struct nouveau_gpuobj *grctx = chan->engctx[engine];
76 struct drm_device *dev = chan->dev;
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 u32 inst = 0x01000000 | (grctx->pinst >> 4);
79 unsigned long flags;
80
81 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
82 nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
83 if (nv_rd32(dev, 0x40032c) == inst)
84 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
85 if (nv_rd32(dev, 0x400330) == inst)
86 nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
87 nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
88 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
89
90 /* Free the context resources */
91 nouveau_gpuobj_ref(NULL, &grctx);
92 chan->engctx[engine] = NULL;
93}
94
95int
96nv40_graph_object_new(struct nouveau_channel *chan, int engine,
97 u32 handle, u16 class)
98{
99 struct drm_device *dev = chan->dev;
100 struct nouveau_gpuobj *obj = NULL;
101 int ret;
102
103 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
104 if (ret)
105 return ret;
106 obj->engine = 1;
107 obj->class = class;
108
109 nv_wo32(obj, 0x00, class);
110 nv_wo32(obj, 0x04, 0x00000000);
111#ifndef __BIG_ENDIAN
112 nv_wo32(obj, 0x08, 0x00000000);
113#else
114 nv_wo32(obj, 0x08, 0x01000000);
115#endif
116 nv_wo32(obj, 0x0c, 0x00000000);
117 nv_wo32(obj, 0x10, 0x00000000);
118
119 ret = nouveau_ramht_insert(chan, handle, obj);
120 nouveau_gpuobj_ref(NULL, &obj);
121 return ret;
122}
123
124static void
125nv40_graph_set_tile_region(struct drm_device *dev, int i)
126{
127 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
129
130 switch (dev_priv->chipset) {
131 case 0x40:
132 case 0x41: /* guess */
133 case 0x42:
134 case 0x43:
135 case 0x45: /* guess */
136 case 0x4e:
137 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
138 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
139 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
140 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
141 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
142 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
143 break;
144 case 0x44:
145 case 0x4a:
146 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
147 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
148 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
149 break;
150 case 0x46:
151 case 0x47:
152 case 0x49:
153 case 0x4b:
154 case 0x4c:
155 case 0x67:
156 default:
157 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
158 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
159 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
160 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
161 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
162 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
163 break;
164 }
165}
166
167/*
168 * G70 0x47
169 * G71 0x49
170 * NV45 0x48
171 * G72[M] 0x46
172 * G73 0x4b
173 * C51_G7X 0x4c
174 * C51 0x4e
175 */
176int
177nv40_graph_init(struct drm_device *dev, int engine)
178{
179 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
182 uint32_t vramsz;
183 int i, j;
184
185 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
186 ~NV_PMC_ENABLE_PGRAPH);
187 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
188 NV_PMC_ENABLE_PGRAPH);
189
190 /* generate and upload context program */
191 nv40_grctx_init(dev, &pgraph->grctx_size);
192
193 /* No context present currently */
194 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
195
196 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
197 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
198
199 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
200 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
201 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
202 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
203 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
204 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
205
206 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
207 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
208
209 j = nv_rd32(dev, 0x1540) & 0xff;
210 if (j) {
211 for (i = 0; !(j & 1); j >>= 1, i++)
212 ;
213 nv_wr32(dev, 0x405000, i);
214 }
215
216 if (dev_priv->chipset == 0x40) {
217 nv_wr32(dev, 0x4009b0, 0x83280fff);
218 nv_wr32(dev, 0x4009b4, 0x000000a0);
219 } else {
220 nv_wr32(dev, 0x400820, 0x83280eff);
221 nv_wr32(dev, 0x400824, 0x000000a0);
222 }
223
224 switch (dev_priv->chipset) {
225 case 0x40:
226 case 0x45:
227 nv_wr32(dev, 0x4009b8, 0x0078e366);
228 nv_wr32(dev, 0x4009bc, 0x0000014c);
229 break;
230 case 0x41:
231 case 0x42: /* pciid also 0x00Cx */
232 /* case 0x0120: XXX (pciid) */
233 nv_wr32(dev, 0x400828, 0x007596ff);
234 nv_wr32(dev, 0x40082c, 0x00000108);
235 break;
236 case 0x43:
237 nv_wr32(dev, 0x400828, 0x0072cb77);
238 nv_wr32(dev, 0x40082c, 0x00000108);
239 break;
240 case 0x44:
241 case 0x46: /* G72 */
242 case 0x4a:
243 case 0x4c: /* G7x-based C51 */
244 case 0x4e:
245 nv_wr32(dev, 0x400860, 0);
246 nv_wr32(dev, 0x400864, 0);
247 break;
248 case 0x47: /* G70 */
249 case 0x49: /* G71 */
250 case 0x4b: /* G73 */
251 nv_wr32(dev, 0x400828, 0x07830610);
252 nv_wr32(dev, 0x40082c, 0x0000016A);
253 break;
254 default:
255 break;
256 }
257
258 nv_wr32(dev, 0x400b38, 0x2ffff800);
259 nv_wr32(dev, 0x400b3c, 0x00006000);
260
261 /* Tiling related stuff. */
262 switch (dev_priv->chipset) {
263 case 0x44:
264 case 0x4a:
265 nv_wr32(dev, 0x400bc4, 0x1003d888);
266 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
267 break;
268 case 0x46:
269 nv_wr32(dev, 0x400bc4, 0x0000e024);
270 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
271 break;
272 case 0x4c:
273 case 0x4e:
274 case 0x67:
275 nv_wr32(dev, 0x400bc4, 0x1003d888);
276 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
277 break;
278 default:
279 break;
280 }
281
282 /* Turn all the tiling regions off. */
283 for (i = 0; i < pfb->num_tiles; i++)
284 nv40_graph_set_tile_region(dev, i);
285
286 /* begin RAM config */
287 vramsz = pci_resource_len(dev->pdev, 0) - 1;
288 switch (dev_priv->chipset) {
289 case 0x40:
290 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
291 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
292 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
293 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
294 nv_wr32(dev, 0x400820, 0);
295 nv_wr32(dev, 0x400824, 0);
296 nv_wr32(dev, 0x400864, vramsz);
297 nv_wr32(dev, 0x400868, vramsz);
298 break;
299 default:
300 switch (dev_priv->chipset) {
301 case 0x41:
302 case 0x42:
303 case 0x43:
304 case 0x45:
305 case 0x4e:
306 case 0x44:
307 case 0x4a:
308 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
309 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
310 break;
311 default:
312 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
313 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
314 break;
315 }
316 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
317 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
318 nv_wr32(dev, 0x400840, 0);
319 nv_wr32(dev, 0x400844, 0);
320 nv_wr32(dev, 0x4008A0, vramsz);
321 nv_wr32(dev, 0x4008A4, vramsz);
322 break;
323 }
324
325 return 0;
326}
327
328static int
329nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
330{
331 u32 inst = nv_rd32(dev, 0x40032c);
332 if (inst & 0x01000000) {
333 nv_wr32(dev, 0x400720, 0x00000000);
334 nv_wr32(dev, 0x400784, inst);
335 nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
336 nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
337 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
338 u32 insn = nv_rd32(dev, 0x400308);
339 NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
340 }
341 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
342 }
343 return 0;
344}
345
346static int
347nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
348{
349 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
350 struct drm_nouveau_private *dev_priv = dev->dev_private;
351 struct nouveau_gpuobj *grctx;
352 unsigned long flags;
353 int i;
354
355 spin_lock_irqsave(&dev_priv->channels.lock, flags);
356 for (i = 0; i < pfifo->channels; i++) {
357 if (!dev_priv->channels.ptr[i])
358 continue;
359 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
360
361 if (grctx && grctx->pinst == inst)
362 break;
363 }
364 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
365 return i;
366}
367
368static void
369nv40_graph_isr(struct drm_device *dev)
370{
371 u32 stat;
372
373 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
374 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
375 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
376 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
377 u32 chid = nv40_graph_isr_chid(dev, inst);
378 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
379 u32 subc = (addr & 0x00070000) >> 16;
380 u32 mthd = (addr & 0x00001ffc);
381 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
382 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
383 u32 show = stat;
384
385 if (stat & NV_PGRAPH_INTR_ERROR) {
386 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
387 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
388 show &= ~NV_PGRAPH_INTR_ERROR;
389 } else
390 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
391 nv_mask(dev, 0x402000, 0, 0);
392 }
393 }
394
395 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
396 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
397
398 if (show && nouveau_ratelimit()) {
399 NV_INFO(dev, "PGRAPH -");
400 nouveau_bitfield_print(nv10_graph_intr, show);
401 printk(" nsource:");
402 nouveau_bitfield_print(nv04_graph_nsource, nsource);
403 printk(" nstatus:");
404 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
405 printk("\n");
406 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
407 "class 0x%04x mthd 0x%04x data 0x%08x\n",
408 chid, inst, subc, class, mthd, data);
409 }
410 }
411}
412
413static void
414nv40_graph_destroy(struct drm_device *dev, int engine)
415{
416 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
417
418 nouveau_irq_unregister(dev, 12);
419
420 NVOBJ_ENGINE_DEL(dev, GR);
421 kfree(pgraph);
422}
423
424int
425nv40_graph_create(struct drm_device *dev)
426{
427 struct nv40_graph_engine *pgraph;
428
429 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
430 if (!pgraph)
431 return -ENOMEM;
432
433 pgraph->base.destroy = nv40_graph_destroy;
434 pgraph->base.init = nv40_graph_init;
435 pgraph->base.fini = nv40_graph_fini;
436 pgraph->base.context_new = nv40_graph_context_new;
437 pgraph->base.context_del = nv40_graph_context_del;
438 pgraph->base.object_new = nv40_graph_object_new;
439 pgraph->base.set_tile_region = nv40_graph_set_tile_region;
440
441 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
442 nouveau_irq_register(dev, 12, nv40_graph_isr);
443
444 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
445 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
446 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
447 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
448 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
449 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
450 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
451 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
452 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
453 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
454 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
455 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
456 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
457 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
458 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
459
460 /* curie */
461 if (nv44_graph_class(dev))
462 NVOBJ_CLASS(dev, 0x4497, GR);
463 else
464 NVOBJ_CLASS(dev, 0x4097, GR);
465
466 return 0;
467}
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
deleted file mode 100644
index 03c0d4c3f355..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ /dev/null
@@ -1,28 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_mc_init(struct drm_device *dev)
8{
9 /* Power up everything, resetting each individual unit will
10 * be done later if needed.
11 */
12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
13
14 if (nv44_graph_class(dev)) {
15 u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
16 nv_wr32(dev, NV40_PMC_1700, tmp);
17 nv_wr32(dev, NV40_PMC_1704, 0);
18 nv_wr32(dev, NV40_PMC_1708, 0);
19 nv_wr32(dev, NV40_PMC_170C, tmp);
20 }
21
22 return 0;
23}
24
25void
26nv40_mc_takedown(struct drm_device *dev)
27{
28}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index e66273aff493..e9b81a97c481 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -23,18 +23,24 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_fifo.h" 30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
31 36
32#define min2(a,b) ((a) < (b) ? (a) : (b)) 37#define min2(a,b) ((a) < (b) ? (a) : (b))
33 38
34static u32 39static u32
35read_pll_1(struct drm_device *dev, u32 reg) 40read_pll_1(struct drm_device *dev, u32 reg)
36{ 41{
37 u32 ctrl = nv_rd32(dev, reg + 0x00); 42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 ctrl = nv_rd32(device, reg + 0x00);
38 int P = (ctrl & 0x00070000) >> 16; 44 int P = (ctrl & 0x00070000) >> 16;
39 int N = (ctrl & 0x0000ff00) >> 8; 45 int N = (ctrl & 0x0000ff00) >> 8;
40 int M = (ctrl & 0x000000ff) >> 0; 46 int M = (ctrl & 0x000000ff) >> 0;
@@ -49,8 +55,9 @@ read_pll_1(struct drm_device *dev, u32 reg)
49static u32 55static u32
50read_pll_2(struct drm_device *dev, u32 reg) 56read_pll_2(struct drm_device *dev, u32 reg)
51{ 57{
52 u32 ctrl = nv_rd32(dev, reg + 0x00); 58 struct nouveau_device *device = nouveau_dev(dev);
53 u32 coef = nv_rd32(dev, reg + 0x04); 59 u32 ctrl = nv_rd32(device, reg + 0x00);
60 u32 coef = nv_rd32(device, reg + 0x04);
54 int N2 = (coef & 0xff000000) >> 24; 61 int N2 = (coef & 0xff000000) >> 24;
55 int M2 = (coef & 0x00ff0000) >> 16; 62 int M2 = (coef & 0x00ff0000) >> 16;
56 int N1 = (coef & 0x0000ff00) >> 8; 63 int N1 = (coef & 0x0000ff00) >> 8;
@@ -89,7 +96,8 @@ read_clk(struct drm_device *dev, u32 src)
89int 96int
90nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 97nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
91{ 98{
92 u32 ctrl = nv_rd32(dev, 0x00c040); 99 struct nouveau_device *device = nouveau_dev(dev);
100 u32 ctrl = nv_rd32(device, 0x00c040);
93 101
94 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0); 102 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
95 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4); 103 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
@@ -107,27 +115,30 @@ struct nv40_pm_state {
107}; 115};
108 116
109static int 117static int
110nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, 118nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
111 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P) 119 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
112{ 120{
121 struct nouveau_device *device = nouveau_dev(dev);
122 struct nouveau_bios *bios = nouveau_bios(device);
123 struct nouveau_clock *pclk = nouveau_clock(device);
113 struct nouveau_pll_vals coef; 124 struct nouveau_pll_vals coef;
114 int ret; 125 int ret;
115 126
116 ret = get_pll_limits(dev, reg, pll); 127 ret = nvbios_pll_parse(bios, reg, pll);
117 if (ret) 128 if (ret)
118 return ret; 129 return ret;
119 130
120 if (clk < pll->vco1.maxfreq) 131 if (clk < pll->vco1.max_freq)
121 pll->vco2.maxfreq = 0; 132 pll->vco2.max_freq = 0;
122 133
123 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); 134 pclk->pll_calc(pclk, pll, clk, &coef);
124 if (ret == 0) 135 if (ret == 0)
125 return -ERANGE; 136 return -ERANGE;
126 137
127 *N1 = coef.N1; 138 *N1 = coef.N1;
128 *M1 = coef.M1; 139 *M1 = coef.M1;
129 if (N2 && M2) { 140 if (N2 && M2) {
130 if (pll->vco2.maxfreq) { 141 if (pll->vco2.max_freq) {
131 *N2 = coef.N2; 142 *N2 = coef.N2;
132 *M2 = coef.M2; 143 *M2 = coef.M2;
133 } else { 144 } else {
@@ -143,7 +154,7 @@ void *
143nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 154nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
144{ 155{
145 struct nv40_pm_state *info; 156 struct nv40_pm_state *info;
146 struct pll_lims pll; 157 struct nvbios_pll pll;
147 int N1, N2, M1, M2, log2P; 158 int N1, N2, M1, M2, log2P;
148 int ret; 159 int ret;
149 160
@@ -191,7 +202,7 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
191 goto out; 202 goto out;
192 203
193 info->mpll_ctrl = 0x80000000 | (log2P << 16); 204 info->mpll_ctrl = 0x80000000 | (log2P << 16);
194 info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20; 205 info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
195 if (N2 == M2) { 206 if (N2 == M2) {
196 info->mpll_ctrl |= 0x00000100; 207 info->mpll_ctrl |= 0x00000100;
197 info->mpll_coef = (N1 << 8) | M1; 208 info->mpll_coef = (N1 << 8) | M1;
@@ -212,12 +223,13 @@ static bool
212nv40_pm_gr_idle(void *data) 223nv40_pm_gr_idle(void *data)
213{ 224{
214 struct drm_device *dev = data; 225 struct drm_device *dev = data;
226 struct nouveau_device *device = nouveau_dev(dev);
215 227
216 if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 != 228 if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
217 (nv_rd32(dev, 0x400760) & 0x0000000f)) 229 (nv_rd32(device, 0x400760) & 0x0000000f))
218 return false; 230 return false;
219 231
220 if (nv_rd32(dev, 0x400700)) 232 if (nv_rd32(device, 0x400700))
221 return false; 233 return false;
222 234
223 return true; 235 return true;
@@ -226,7 +238,9 @@ nv40_pm_gr_idle(void *data)
226int 238int
227nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) 239nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
228{ 240{
229 struct drm_nouveau_private *dev_priv = dev->dev_private; 241 struct nouveau_device *device = nouveau_dev(dev);
242 struct nouveau_fifo *pfifo = nouveau_fifo(device);
243 struct nouveau_drm *drm = nouveau_drm(dev);
230 struct nv40_pm_state *info = pre_state; 244 struct nv40_pm_state *info = pre_state;
231 unsigned long flags; 245 unsigned long flags;
232 struct bit_entry M; 246 struct bit_entry M;
@@ -236,12 +250,12 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
236 250
237 /* determine which CRTCs are active, fetch VGA_SR1 for each */ 251 /* determine which CRTCs are active, fetch VGA_SR1 for each */
238 for (i = 0; i < 2; i++) { 252 for (i = 0; i < 2; i++) {
239 u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000)); 253 u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
240 u32 cnt = 0; 254 u32 cnt = 0;
241 do { 255 do {
242 if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) { 256 if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
243 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 257 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
244 sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000)); 258 sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
245 if (!(sr1[i] & 0x20)) 259 if (!(sr1[i] & 0x20))
246 crtc_mask |= (1 << i); 260 crtc_mask |= (1 << i);
247 break; 261 break;
@@ -251,28 +265,20 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
251 } 265 }
252 266
253 /* halt and idle engines */ 267 /* halt and idle engines */
254 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 268 pfifo->pause(pfifo, &flags);
255 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
256 if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
257 goto resume;
258 nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
259 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
260 goto resume;
261 nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
262 nv04_fifo_cache_pull(dev, false);
263 269
264 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev)) 270 if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
265 goto resume; 271 goto resume;
266 272
267 ret = 0; 273 ret = 0;
268 274
269 /* set engine clocks */ 275 /* set engine clocks */
270 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000); 276 nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
271 nv_wr32(dev, 0x004004, info->npll_coef); 277 nv_wr32(device, 0x004004, info->npll_coef);
272 nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl); 278 nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
273 nv_mask(dev, 0x004008, 0xc007ffff, info->spll); 279 nv_mask(device, 0x004008, 0xc007ffff, info->spll);
274 mdelay(5); 280 mdelay(5);
275 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 281 nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
276 282
277 if (!info->mpll_ctrl) 283 if (!info->mpll_ctrl)
278 goto resume; 284 goto resume;
@@ -281,52 +287,52 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
281 for (i = 0; i < 2; i++) { 287 for (i = 0; i < 2; i++) {
282 if (!(crtc_mask & (1 << i))) 288 if (!(crtc_mask & (1 << i)))
283 continue; 289 continue;
284 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000); 290 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
285 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 291 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
286 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 292 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
287 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); 293 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
288 } 294 }
289 295
290 /* prepare ram for reclocking */ 296 /* prepare ram for reclocking */
291 nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */ 297 nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
292 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ 298 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
293 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ 299 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
294 nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ 300 nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
295 nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */ 301 nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
296 302
297 /* change the PLL of each memory partition */ 303 /* change the PLL of each memory partition */
298 nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000); 304 nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
299 switch (dev_priv->chipset) { 305 switch (nv_device(drm->device)->chipset) {
300 case 0x40: 306 case 0x40:
301 case 0x45: 307 case 0x45:
302 case 0x41: 308 case 0x41:
303 case 0x42: 309 case 0x42:
304 case 0x47: 310 case 0x47:
305 nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl); 311 nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
306 nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl); 312 nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
307 nv_wr32(dev, 0x004048, info->mpll_coef); 313 nv_wr32(device, 0x004048, info->mpll_coef);
308 nv_wr32(dev, 0x004030, info->mpll_coef); 314 nv_wr32(device, 0x004030, info->mpll_coef);
309 case 0x43: 315 case 0x43:
310 case 0x49: 316 case 0x49:
311 case 0x4b: 317 case 0x4b:
312 nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl); 318 nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
313 nv_wr32(dev, 0x00403c, info->mpll_coef); 319 nv_wr32(device, 0x00403c, info->mpll_coef);
314 default: 320 default:
315 nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl); 321 nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
316 nv_wr32(dev, 0x004024, info->mpll_coef); 322 nv_wr32(device, 0x004024, info->mpll_coef);
317 break; 323 break;
318 } 324 }
319 udelay(100); 325 udelay(100);
320 nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000); 326 nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
321 327
322 /* re-enable normal operation of memory controller */ 328 /* re-enable normal operation of memory controller */
323 nv_wr32(dev, 0x1002dc, 0x00000000); 329 nv_wr32(device, 0x1002dc, 0x00000000);
324 nv_mask(dev, 0x100210, 0x80000000, 0x80000000); 330 nv_mask(device, 0x100210, 0x80000000, 0x80000000);
325 udelay(100); 331 udelay(100);
326 332
327 /* execute memory reset script from vbios */ 333 /* execute memory reset script from vbios */
328 if (!bit_table(dev, 'M', &M)) 334 if (!bit_table(dev, 'M', &M))
329 nouveau_bios_init_exec(dev, ROM16(M.data[0])); 335 nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
330 336
331 /* make sure we're in vblank (hopefully the same one as before), and 337 /* make sure we're in vblank (hopefully the same one as before), and
332 * then re-enable crtc memory access 338 * then re-enable crtc memory access
@@ -334,62 +340,14 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
334 for (i = 0; i < 2; i++) { 340 for (i = 0; i < 2; i++) {
335 if (!(crtc_mask & (1 << i))) 341 if (!(crtc_mask & (1 << i)))
336 continue; 342 continue;
337 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 343 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
338 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 344 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
339 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]); 345 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
340 } 346 }
341 347
342 /* resume engines */ 348 /* resume engines */
343resume: 349resume:
344 nv_wr32(dev, 0x003250, 0x00000001); 350 pfifo->start(pfifo, &flags);
345 nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
346 nv_wr32(dev, 0x003200, 0x00000001);
347 nv_wr32(dev, 0x002500, 0x00000001);
348 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
349
350 kfree(info); 351 kfree(info);
351 return ret; 352 return ret;
352} 353}
353
354int
355nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
356{
357 if (line == 2) {
358 u32 reg = nv_rd32(dev, 0x0010f0);
359 if (reg & 0x80000000) {
360 *duty = (reg & 0x7fff0000) >> 16;
361 *divs = (reg & 0x00007fff);
362 return 0;
363 }
364 } else
365 if (line == 9) {
366 u32 reg = nv_rd32(dev, 0x0015f4);
367 if (reg & 0x80000000) {
368 *divs = nv_rd32(dev, 0x0015f8);
369 *duty = (reg & 0x7fffffff);
370 return 0;
371 }
372 } else {
373 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
374 return -ENODEV;
375 }
376
377 return -EINVAL;
378}
379
380int
381nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
382{
383 if (line == 2) {
384 nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
385 } else
386 if (line == 9) {
387 nv_wr32(dev, 0x0015f8, divs);
388 nv_wr32(dev, 0x0015f4, duty | 0x80000000);
389 } else {
390 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
391 return -ENODEV;
392 }
393
394 return 0;
395}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 22cebd5dd694..a771e9067ebf 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -28,24 +28,27 @@
28#include "drm_mode.h" 28#include "drm_mode.h"
29#include "drm_crtc_helper.h" 29#include "drm_crtc_helper.h"
30 30
31#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
32#include "nouveau_reg.h" 31#include "nouveau_reg.h"
33#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_gem.h"
34#include "nouveau_hw.h" 35#include "nouveau_hw.h"
35#include "nouveau_encoder.h" 36#include "nouveau_encoder.h"
36#include "nouveau_crtc.h" 37#include "nouveau_crtc.h"
37#include "nouveau_fb.h"
38#include "nouveau_connector.h" 38#include "nouveau_connector.h"
39#include "nv50_display.h" 39#include "nv50_display.h"
40 40
41#include <subdev/clock.h>
42
41static void 43static void
42nv50_crtc_lut_load(struct drm_crtc *crtc) 44nv50_crtc_lut_load(struct drm_crtc *crtc)
43{ 45{
46 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
44 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 47 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); 48 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
46 int i; 49 int i;
47 50
48 NV_DEBUG_KMS(crtc->dev, "\n"); 51 NV_DEBUG(drm, "\n");
49 52
50 for (i = 0; i < 256; i++) { 53 for (i = 0; i < 256; i++) {
51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); 54 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -64,25 +67,25 @@ int
64nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) 67nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
65{ 68{
66 struct drm_device *dev = nv_crtc->base.dev; 69 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private; 70 struct nouveau_drm *drm = nouveau_drm(dev);
68 struct nouveau_channel *evo = nv50_display(dev)->master; 71 struct nouveau_channel *evo = nv50_display(dev)->master;
69 int index = nv_crtc->index, ret; 72 int index = nv_crtc->index, ret;
70 73
71 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 74 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
72 NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked"); 75 NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
73 76
74 if (blanked) { 77 if (blanked) {
75 nv_crtc->cursor.hide(nv_crtc, false); 78 nv_crtc->cursor.hide(nv_crtc, false);
76 79
77 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5); 80 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
78 if (ret) { 81 if (ret) {
79 NV_ERROR(dev, "no space while blanking crtc\n"); 82 NV_ERROR(drm, "no space while blanking crtc\n");
80 return ret; 83 return ret;
81 } 84 }
82 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 85 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); 86 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0); 87 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) { 88 if (nv_device(drm->device)->chipset != 0x50) {
86 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 89 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); 90 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 } 91 }
@@ -95,9 +98,9 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
95 else 98 else
96 nv_crtc->cursor.hide(nv_crtc, false); 99 nv_crtc->cursor.hide(nv_crtc, false);
97 100
98 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8); 101 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
99 if (ret) { 102 if (ret) {
100 NV_ERROR(dev, "no space while unblanking crtc\n"); 103 NV_ERROR(drm, "no space while unblanking crtc\n");
101 return ret; 104 return ret;
102 } 105 }
103 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 106 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
@@ -105,7 +108,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
105 NV50_EVO_CRTC_CLUT_MODE_OFF : 108 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON); 109 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); 110 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
108 if (dev_priv->chipset != 0x50) { 111 if (nv_device(drm->device)->chipset != 0x50) {
109 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 112 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
110 OUT_RING(evo, NvEvoVRAM); 113 OUT_RING(evo, NvEvoVRAM);
111 } 114 }
@@ -114,7 +117,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
114 OUT_RING(evo, nv_crtc->fb.offset >> 8); 117 OUT_RING(evo, nv_crtc->fb.offset >> 8);
115 OUT_RING(evo, 0); 118 OUT_RING(evo, 0);
116 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 119 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
117 if (dev_priv->chipset != 0x50) 120 if (nv_device(drm->device)->chipset != 0x50)
118 if (nv_crtc->fb.tile_flags == 0x7a00 || 121 if (nv_crtc->fb.tile_flags == 0x7a00 ||
119 nv_crtc->fb.tile_flags == 0xfe00) 122 nv_crtc->fb.tile_flags == 0xfe00)
120 OUT_RING(evo, NvEvoFB32); 123 OUT_RING(evo, NvEvoFB32);
@@ -174,17 +177,18 @@ static int
174nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) 177nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
175{ 178{
176 struct drm_device *dev = nv_crtc->base.dev; 179 struct drm_device *dev = nv_crtc->base.dev;
180 struct nouveau_drm *drm = nouveau_drm(dev);
177 struct nouveau_channel *evo = nv50_display(dev)->master; 181 struct nouveau_channel *evo = nv50_display(dev)->master;
178 int ret; 182 int ret;
179 int adj; 183 int adj;
180 u32 hue, vib; 184 u32 hue, vib;
181 185
182 NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n", 186 NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
183 nv_crtc->color_vibrance, nv_crtc->vibrant_hue); 187 nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
184 188
185 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 189 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
186 if (ret) { 190 if (ret) {
187 NV_ERROR(dev, "no space while setting color vibrance\n"); 191 NV_ERROR(drm, "no space while setting color vibrance\n");
188 return ret; 192 return ret;
189 } 193 }
190 194
@@ -229,17 +233,18 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
229 struct nouveau_connector *nv_connector; 233 struct nouveau_connector *nv_connector;
230 struct drm_crtc *crtc = &nv_crtc->base; 234 struct drm_crtc *crtc = &nv_crtc->base;
231 struct drm_device *dev = crtc->dev; 235 struct drm_device *dev = crtc->dev;
236 struct nouveau_drm *drm = nouveau_drm(dev);
232 struct nouveau_channel *evo = nv50_display(dev)->master; 237 struct nouveau_channel *evo = nv50_display(dev)->master;
233 struct drm_display_mode *umode = &crtc->mode; 238 struct drm_display_mode *umode = &crtc->mode;
234 struct drm_display_mode *omode; 239 struct drm_display_mode *omode;
235 int scaling_mode, ret; 240 int scaling_mode, ret;
236 u32 ctrl = 0, oX, oY; 241 u32 ctrl = 0, oX, oY;
237 242
238 NV_DEBUG_KMS(dev, "\n"); 243 NV_DEBUG(drm, "\n");
239 244
240 nv_connector = nouveau_crtc_connector_get(nv_crtc); 245 nv_connector = nouveau_crtc_connector_get(nv_crtc);
241 if (!nv_connector || !nv_connector->native_mode) { 246 if (!nv_connector || !nv_connector->native_mode) {
242 NV_ERROR(dev, "no native mode, forcing panel scaling\n"); 247 NV_ERROR(drm, "no native mode, forcing panel scaling\n");
243 scaling_mode = DRM_MODE_SCALE_NONE; 248 scaling_mode = DRM_MODE_SCALE_NONE;
244 } else { 249 } else {
245 scaling_mode = nv_connector->scaling_mode; 250 scaling_mode = nv_connector->scaling_mode;
@@ -329,63 +334,19 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
329int 334int
330nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) 335nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
331{ 336{
332 struct drm_nouveau_private *dev_priv = dev->dev_private; 337 struct nouveau_device *device = nouveau_dev(dev);
333 struct pll_lims pll; 338 struct nouveau_clock *clk = nouveau_clock(device);
334 uint32_t reg1, reg2;
335 int ret, N1, M1, N2, M2, P;
336
337 ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
338 if (ret)
339 return ret;
340
341 if (pll.vco2.maxfreq) {
342 ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
343 if (ret <= 0)
344 return 0;
345
346 NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
347 pclk, ret, N1, M1, N2, M2, P);
348
349 reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
350 reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
351 nv_wr32(dev, pll.reg + 0, 0x10000611);
352 nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
353 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
354 } else
355 if (dev_priv->chipset < NV_C0) {
356 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
357 if (ret <= 0)
358 return 0;
359
360 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
361 pclk, ret, N1, N2, M1, P);
362
363 reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
364 nv_wr32(dev, pll.reg + 0, 0x50000610);
365 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
366 nv_wr32(dev, pll.reg + 8, N2);
367 } else {
368 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
369 if (ret <= 0)
370 return 0;
371
372 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
373 pclk, ret, N1, N2, M1, P);
374 339
375 nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); 340 return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
376 nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
377 nv_wr32(dev, pll.reg + 0x10, N2 << 16);
378 }
379
380 return 0;
381} 341}
382 342
383static void 343static void
384nv50_crtc_destroy(struct drm_crtc *crtc) 344nv50_crtc_destroy(struct drm_crtc *crtc)
385{ 345{
386 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 346 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
347 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
387 348
388 NV_DEBUG_KMS(crtc->dev, "\n"); 349 NV_DEBUG(drm, "\n");
389 350
390 nouveau_bo_unmap(nv_crtc->lut.nvbo); 351 nouveau_bo_unmap(nv_crtc->lut.nvbo);
391 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 352 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
@@ -474,13 +435,15 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
474static void 435static void
475nv50_crtc_save(struct drm_crtc *crtc) 436nv50_crtc_save(struct drm_crtc *crtc)
476{ 437{
477 NV_ERROR(crtc->dev, "!!\n"); 438 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
439 NV_ERROR(drm, "!!\n");
478} 440}
479 441
480static void 442static void
481nv50_crtc_restore(struct drm_crtc *crtc) 443nv50_crtc_restore(struct drm_crtc *crtc)
482{ 444{
483 NV_ERROR(crtc->dev, "!!\n"); 445 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
446 NV_ERROR(drm, "!!\n");
484} 447}
485 448
486static const struct drm_crtc_funcs nv50_crtc_funcs = { 449static const struct drm_crtc_funcs nv50_crtc_funcs = {
@@ -504,8 +467,9 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
504{ 467{
505 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 468 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
506 struct drm_device *dev = crtc->dev; 469 struct drm_device *dev = crtc->dev;
470 struct nouveau_drm *drm = nouveau_drm(dev);
507 471
508 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 472 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
509 473
510 nv50_display_flip_stop(crtc); 474 nv50_display_flip_stop(crtc);
511 drm_vblank_pre_modeset(dev, nv_crtc->index); 475 drm_vblank_pre_modeset(dev, nv_crtc->index);
@@ -516,9 +480,10 @@ static void
516nv50_crtc_commit(struct drm_crtc *crtc) 480nv50_crtc_commit(struct drm_crtc *crtc)
517{ 481{
518 struct drm_device *dev = crtc->dev; 482 struct drm_device *dev = crtc->dev;
483 struct nouveau_drm *drm = nouveau_drm(dev);
519 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 484 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
520 485
521 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 486 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
522 487
523 nv50_crtc_blank(nv_crtc, false); 488 nv50_crtc_blank(nv_crtc, false);
524 drm_vblank_post_modeset(dev, nv_crtc->index); 489 drm_vblank_post_modeset(dev, nv_crtc->index);
@@ -540,17 +505,17 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
540{ 505{
541 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 506 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
542 struct drm_device *dev = nv_crtc->base.dev; 507 struct drm_device *dev = nv_crtc->base.dev;
543 struct drm_nouveau_private *dev_priv = dev->dev_private; 508 struct nouveau_drm *drm = nouveau_drm(dev);
544 struct nouveau_channel *evo = nv50_display(dev)->master; 509 struct nouveau_channel *evo = nv50_display(dev)->master;
545 struct drm_framebuffer *drm_fb; 510 struct drm_framebuffer *drm_fb;
546 struct nouveau_framebuffer *fb; 511 struct nouveau_framebuffer *fb;
547 int ret; 512 int ret;
548 513
549 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 514 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
550 515
551 /* no fb bound */ 516 /* no fb bound */
552 if (!atomic && !crtc->fb) { 517 if (!atomic && !crtc->fb) {
553 NV_DEBUG_KMS(dev, "No FB bound\n"); 518 NV_DEBUG(drm, "No FB bound\n");
554 return 0; 519 return 0;
555 } 520 }
556 521
@@ -580,7 +545,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
580 nv_crtc->fb.offset = fb->nvbo->bo.offset; 545 nv_crtc->fb.offset = fb->nvbo->bo.offset;
581 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 546 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
582 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 547 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
583 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 548 if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
584 ret = RING_SPACE(evo, 2); 549 ret = RING_SPACE(evo, 2);
585 if (ret) 550 if (ret)
586 return ret; 551 return ret;
@@ -738,10 +703,11 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
738int 703int
739nv50_crtc_create(struct drm_device *dev, int index) 704nv50_crtc_create(struct drm_device *dev, int index)
740{ 705{
706 struct nouveau_drm *drm = nouveau_drm(dev);
741 struct nouveau_crtc *nv_crtc = NULL; 707 struct nouveau_crtc *nv_crtc = NULL;
742 int ret, i; 708 int ret, i;
743 709
744 NV_DEBUG_KMS(dev, "\n"); 710 NV_DEBUG(drm, "\n");
745 711
746 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); 712 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
747 if (!nv_crtc) 713 if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index af4ec7bf3670..ba047e9251b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -27,9 +27,8 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_mode.h" 28#include "drm_mode.h"
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 30#include "nouveau_drm.h"
31#include "nouveau_reg.h" 31#include "nouveau_dma.h"
32#include "nouveau_drv.h"
33#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
34#include "nv50_display.h" 33#include "nv50_display.h"
35 34
@@ -37,22 +36,22 @@ static void
37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) 36nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
38{ 37{
39 struct drm_device *dev = nv_crtc->base.dev; 38 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private; 39 struct nouveau_drm *drm = nouveau_drm(dev);
41 struct nouveau_channel *evo = nv50_display(dev)->master; 40 struct nouveau_channel *evo = nv50_display(dev)->master;
42 int ret; 41 int ret;
43 42
44 NV_DEBUG_KMS(dev, "\n"); 43 NV_DEBUG(drm, "\n");
45 44
46 if (update && nv_crtc->cursor.visible) 45 if (update && nv_crtc->cursor.visible)
47 return; 46 return;
48 47
49 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); 48 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
50 if (ret) { 49 if (ret) {
51 NV_ERROR(dev, "no space while unhiding cursor\n"); 50 NV_ERROR(drm, "no space while unhiding cursor\n");
52 return; 51 return;
53 } 52 }
54 53
55 if (dev_priv->chipset != 0x50) { 54 if (nv_device(drm->device)->chipset != 0x50) {
56 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 55 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM); 56 OUT_RING(evo, NvEvoVRAM);
58 } 57 }
@@ -72,24 +71,24 @@ static void
72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) 71nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
73{ 72{
74 struct drm_device *dev = nv_crtc->base.dev; 73 struct drm_device *dev = nv_crtc->base.dev;
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 74 struct nouveau_drm *drm = nouveau_drm(dev);
76 struct nouveau_channel *evo = nv50_display(dev)->master; 75 struct nouveau_channel *evo = nv50_display(dev)->master;
77 int ret; 76 int ret;
78 77
79 NV_DEBUG_KMS(dev, "\n"); 78 NV_DEBUG(drm, "\n");
80 79
81 if (update && !nv_crtc->cursor.visible) 80 if (update && !nv_crtc->cursor.visible)
82 return; 81 return;
83 82
84 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); 83 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
85 if (ret) { 84 if (ret) {
86 NV_ERROR(dev, "no space while hiding cursor\n"); 85 NV_ERROR(drm, "no space while hiding cursor\n");
87 return; 86 return;
88 } 87 }
89 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 88 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); 89 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0); 90 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) { 91 if (nv_device(drm->device)->chipset != 0x50) {
93 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 92 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); 93 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 } 94 }
@@ -105,19 +104,18 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
105static void 104static void
106nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) 105nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{ 106{
108 struct drm_device *dev = nv_crtc->base.dev; 107 struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
109 108
110 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; 109 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
111 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), 110 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
112 ((y & 0xFFFF) << 16) | (x & 0xFFFF)); 111 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
113 /* Needed to make the cursor move. */ 112 /* Needed to make the cursor move. */
114 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); 113 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
115} 114}
116 115
117static void 116static void
118nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) 117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
119{ 118{
120 NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
121 if (offset == nv_crtc->cursor.offset) 119 if (offset == nv_crtc->cursor.offset)
122 return; 120 return;
123 121
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 2c36a6b92c53..4a01b49d5ece 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -29,18 +29,21 @@
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h" 31#include "nouveau_reg.h"
32#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <subdev/timer.h>
40
39static void 41static void
40nv50_dac_disconnect(struct drm_encoder *encoder) 42nv50_dac_disconnect(struct drm_encoder *encoder)
41{ 43{
42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 44 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
43 struct drm_device *dev = encoder->dev; 45 struct drm_device *dev = encoder->dev;
46 struct nouveau_drm *drm = nouveau_drm(dev);
44 struct nouveau_channel *evo = nv50_display(dev)->master; 47 struct nouveau_channel *evo = nv50_display(dev)->master;
45 int ret; 48 int ret;
46 49
@@ -48,11 +51,11 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
48 return; 51 return;
49 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); 52 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
50 53
51 NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); 54 NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
52 55
53 ret = RING_SPACE(evo, 4); 56 ret = RING_SPACE(evo, 4);
54 if (ret) { 57 if (ret) {
55 NV_ERROR(dev, "no space while disconnecting DAC\n"); 58 NV_ERROR(drm, "no space while disconnecting DAC\n");
56 return; 59 return;
57 } 60 }
58 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); 61 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
@@ -67,43 +70,43 @@ static enum drm_connector_status
67nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 70nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
68{ 71{
69 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 72 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
70 struct drm_device *dev = encoder->dev; 73 struct nouveau_device *device = nouveau_dev(encoder->dev);
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 74 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
72 enum drm_connector_status status = connector_status_disconnected; 75 enum drm_connector_status status = connector_status_disconnected;
73 uint32_t dpms_state, load_pattern, load_state; 76 uint32_t dpms_state, load_pattern, load_state;
74 int or = nv_encoder->or; 77 int or = nv_encoder->or;
75 78
76 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); 79 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
77 dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); 80 dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
78 81
79 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 82 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
80 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 83 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
81 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 84 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
82 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 85 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
83 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 86 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
84 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 87 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
85 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); 88 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
86 return status; 89 return status;
87 } 90 }
88 91
89 /* Use bios provided value if possible. */ 92 /* Use bios provided value if possible. */
90 if (dev_priv->vbios.dactestval) { 93 if (drm->vbios.dactestval) {
91 load_pattern = dev_priv->vbios.dactestval; 94 load_pattern = drm->vbios.dactestval;
92 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", 95 NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
93 load_pattern); 96 load_pattern);
94 } else { 97 } else {
95 load_pattern = 340; 98 load_pattern = 340;
96 NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", 99 NV_DEBUG(drm, "Using default load_pattern of %d\n",
97 load_pattern); 100 load_pattern);
98 } 101 }
99 102
100 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 103 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
101 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); 104 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
102 mdelay(45); /* give it some time to process */ 105 mdelay(45); /* give it some time to process */
103 load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); 106 load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
104 107
105 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); 108 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
106 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | 109 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
107 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 110 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
108 111
109 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == 112 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
@@ -111,9 +114,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
111 status = connector_status_connected; 114 status = connector_status_connected;
112 115
113 if (status == connector_status_connected) 116 if (status == connector_status_connected)
114 NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); 117 NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
115 else 118 else
116 NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); 119 NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
117 120
118 return status; 121 return status;
119} 122}
@@ -121,23 +124,24 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
121static void 124static void
122nv50_dac_dpms(struct drm_encoder *encoder, int mode) 125nv50_dac_dpms(struct drm_encoder *encoder, int mode)
123{ 126{
124 struct drm_device *dev = encoder->dev; 127 struct nouveau_device *device = nouveau_dev(encoder->dev);
128 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
125 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
126 uint32_t val; 130 uint32_t val;
127 int or = nv_encoder->or; 131 int or = nv_encoder->or;
128 132
129 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 133 NV_DEBUG(drm, "or %d mode %d\n", or, mode);
130 134
131 /* wait for it to be done */ 135 /* wait for it to be done */
132 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 136 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
133 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 137 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
134 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 138 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
135 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 139 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
136 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); 140 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
137 return; 141 return;
138 } 142 }
139 143
140 val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; 144 val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
141 145
142 if (mode != DRM_MODE_DPMS_ON) 146 if (mode != DRM_MODE_DPMS_ON)
143 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; 147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
@@ -158,20 +162,22 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
158 break; 162 break;
159 } 163 }
160 164
161 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | 165 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
162 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 166 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
163} 167}
164 168
165static void 169static void
166nv50_dac_save(struct drm_encoder *encoder) 170nv50_dac_save(struct drm_encoder *encoder)
167{ 171{
168 NV_ERROR(encoder->dev, "!!\n"); 172 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
173 NV_ERROR(drm, "!!\n");
169} 174}
170 175
171static void 176static void
172nv50_dac_restore(struct drm_encoder *encoder) 177nv50_dac_restore(struct drm_encoder *encoder)
173{ 178{
174 NV_ERROR(encoder->dev, "!!\n"); 179 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
180 NV_ERROR(drm, "!!\n");
175} 181}
176 182
177static bool 183static bool
@@ -179,14 +185,15 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder,
179 const struct drm_display_mode *mode, 185 const struct drm_display_mode *mode,
180 struct drm_display_mode *adjusted_mode) 186 struct drm_display_mode *adjusted_mode)
181{ 187{
188 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
182 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 189 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
183 struct nouveau_connector *connector; 190 struct nouveau_connector *connector;
184 191
185 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); 192 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
186 193
187 connector = nouveau_encoder_connector_get(nv_encoder); 194 connector = nouveau_encoder_connector_get(nv_encoder);
188 if (!connector) { 195 if (!connector) {
189 NV_ERROR(encoder->dev, "Encoder has no connector\n"); 196 NV_ERROR(drm, "Encoder has no connector\n");
190 return false; 197 return false;
191 } 198 }
192 199
@@ -207,13 +214,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
207 struct drm_display_mode *adjusted_mode) 214 struct drm_display_mode *adjusted_mode)
208{ 215{
209 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 216 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
217 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
210 struct drm_device *dev = encoder->dev; 218 struct drm_device *dev = encoder->dev;
211 struct nouveau_channel *evo = nv50_display(dev)->master; 219 struct nouveau_channel *evo = nv50_display(dev)->master;
212 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 220 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
213 uint32_t mode_ctl = 0, mode_ctl2 = 0; 221 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret; 222 int ret;
215 223
216 NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", 224 NV_DEBUG(drm, "or %d type %d crtc %d\n",
217 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 225 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
218 226
219 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); 227 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -224,10 +232,10 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
224 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; 232 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
225 233
226 /* Lacking a working tv-out, this is not a 100% sure. */ 234 /* Lacking a working tv-out, this is not a 100% sure. */
227 if (nv_encoder->dcb->type == OUTPUT_ANALOG) 235 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
228 mode_ctl |= 0x40; 236 mode_ctl |= 0x40;
229 else 237 else
230 if (nv_encoder->dcb->type == OUTPUT_TV) 238 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
231 mode_ctl |= 0x100; 239 mode_ctl |= 0x100;
232 240
233 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 241 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -238,7 +246,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
238 246
239 ret = RING_SPACE(evo, 3); 247 ret = RING_SPACE(evo, 3);
240 if (ret) { 248 if (ret) {
241 NV_ERROR(dev, "no space while connecting DAC\n"); 249 NV_ERROR(drm, "no space while connecting DAC\n");
242 return; 250 return;
243 } 251 }
244 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); 252 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
@@ -271,11 +279,12 @@ static void
271nv50_dac_destroy(struct drm_encoder *encoder) 279nv50_dac_destroy(struct drm_encoder *encoder)
272{ 280{
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 281 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
282 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
274 283
275 if (!encoder) 284 if (!encoder)
276 return; 285 return;
277 286
278 NV_DEBUG_KMS(encoder->dev, "\n"); 287 NV_DEBUG(drm, "\n");
279 288
280 drm_encoder_cleanup(encoder); 289 drm_encoder_cleanup(encoder);
281 kfree(nv_encoder); 290 kfree(nv_encoder);
@@ -286,7 +295,7 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
286}; 295};
287 296
288int 297int
289nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) 298nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
290{ 299{
291 struct nouveau_encoder *nv_encoder; 300 struct nouveau_encoder *nv_encoder;
292 struct drm_encoder *encoder; 301 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index b244d9968c5d..787ddc9f314c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,28 +24,30 @@
24 * 24 *
25 */ 25 */
26 26
27#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 27#include "nouveau_drm.h"
28#include "nouveau_dma.h"
29
28#include "nv50_display.h" 30#include "nv50_display.h"
29#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
30#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 33#include "nouveau_connector.h"
32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 34#include "nouveau_fbcon.h"
34#include "nouveau_ramht.h"
35#include "nouveau_software.h"
36#include "drm_crtc_helper.h" 35#include "drm_crtc_helper.h"
36#include "nouveau_fence.h"
37
38#include <core/gpuobj.h>
39#include <subdev/timer.h>
37 40
38static void nv50_display_isr(struct drm_device *);
39static void nv50_display_bh(unsigned long); 41static void nv50_display_bh(unsigned long);
40 42
41static inline int 43static inline int
42nv50_sor_nr(struct drm_device *dev) 44nv50_sor_nr(struct drm_device *dev)
43{ 45{
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 46 struct nouveau_device *device = nouveau_dev(dev);
45 47
46 if (dev_priv->chipset < 0x90 || 48 if (device->chipset < 0x90 ||
47 dev_priv->chipset == 0x92 || 49 device->chipset == 0x92 ||
48 dev_priv->chipset == 0xa0) 50 device->chipset == 0xa0)
49 return 2; 51 return 2;
50 52
51 return 4; 53 return 4;
@@ -54,73 +56,29 @@ nv50_sor_nr(struct drm_device *dev)
54u32 56u32
55nv50_display_active_crtcs(struct drm_device *dev) 57nv50_display_active_crtcs(struct drm_device *dev)
56{ 58{
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 59 struct nouveau_device *device = nouveau_dev(dev);
58 u32 mask = 0; 60 u32 mask = 0;
59 int i; 61 int i;
60 62
61 if (dev_priv->chipset < 0x90 || 63 if (device->chipset < 0x90 ||
62 dev_priv->chipset == 0x92 || 64 device->chipset == 0x92 ||
63 dev_priv->chipset == 0xa0) { 65 device->chipset == 0xa0) {
64 for (i = 0; i < 2; i++) 66 for (i = 0; i < 2; i++)
65 mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 67 mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
66 } else { 68 } else {
67 for (i = 0; i < 4; i++) 69 for (i = 0; i < 4; i++)
68 mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 70 mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
69 } 71 }
70 72
71 for (i = 0; i < 3; i++) 73 for (i = 0; i < 3; i++)
72 mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 74 mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
73 75
74 return mask & 3; 76 return mask & 3;
75} 77}
76 78
77static int
78evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
79{
80 int ret = 0;
81 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
82 nv_wr32(dev, 0x610304 + (ch * 0x08), data);
83 nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
84 if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
85 ret = -EBUSY;
86 if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
87 NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
88 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
89 return ret;
90}
91
92int 79int
93nv50_display_early_init(struct drm_device *dev) 80nv50_display_early_init(struct drm_device *dev)
94{ 81{
95 u32 ctrl = nv_rd32(dev, 0x610200);
96 int i;
97
98 /* check if master evo channel is already active, a good a sign as any
99 * that the display engine is in a weird state (hibernate/kexec), if
100 * it is, do our best to reset the display engine...
101 */
102 if ((ctrl & 0x00000003) == 0x00000003) {
103 NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
104
105 /* deactivate both heads first, PDISP will disappear forever
106 * (well, until you power cycle) on some boards as soon as
107 * PMC_ENABLE is hit unless they are..
108 */
109 for (i = 0; i < 2; i++) {
110 evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
111 evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
112 evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
113 evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
114 evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
115 evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
116 }
117 evo_icmd(dev, 0, 0x0080, 0);
118
119 /* reset PDISP */
120 nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
121 nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
122 }
123
124 return 0; 82 return 0;
125} 83}
126 84
@@ -132,11 +90,8 @@ nv50_display_late_takedown(struct drm_device *dev)
132int 90int
133nv50_display_sync(struct drm_device *dev) 91nv50_display_sync(struct drm_device *dev)
134{ 92{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
137 struct nv50_display *disp = nv50_display(dev); 93 struct nv50_display *disp = nv50_display(dev);
138 struct nouveau_channel *evo = disp->master; 94 struct nouveau_channel *evo = disp->master;
139 u64 start;
140 int ret; 95 int ret;
141 96
142 ret = RING_SPACE(evo, 6); 97 ret = RING_SPACE(evo, 6);
@@ -148,29 +103,28 @@ nv50_display_sync(struct drm_device *dev)
148 BEGIN_NV04(evo, 0, 0x0084, 1); 103 BEGIN_NV04(evo, 0, 0x0084, 1);
149 OUT_RING (evo, 0x00000000); 104 OUT_RING (evo, 0x00000000);
150 105
151 nv_wo32(disp->ntfy, 0x000, 0x00000000); 106 nv_wo32(disp->ramin, 0x2000, 0x00000000);
152 FIRE_RING (evo); 107 FIRE_RING (evo);
153 108
154 start = ptimer->read(dev); 109 if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
155 do { 110 return 0;
156 if (nv_ro32(disp->ntfy, 0x000))
157 return 0;
158 } while (ptimer->read(dev) - start < 2000000000ULL);
159 } 111 }
160 112
161 return -EBUSY; 113 return 0;
162} 114}
163 115
164int 116int
165nv50_display_init(struct drm_device *dev) 117nv50_display_init(struct drm_device *dev)
166{ 118{
119 struct nouveau_drm *drm = nouveau_drm(dev);
120 struct nouveau_device *device = nouveau_dev(dev);
167 struct nouveau_channel *evo; 121 struct nouveau_channel *evo;
168 int ret, i; 122 int ret, i;
169 u32 val; 123 u32 val;
170 124
171 NV_DEBUG_KMS(dev, "\n"); 125 NV_DEBUG(drm, "\n");
172 126
173 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); 127 nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
174 128
175 /* 129 /*
176 * I think the 0x006101XX range is some kind of main control area 130 * I think the 0x006101XX range is some kind of main control area
@@ -178,82 +132,82 @@ nv50_display_init(struct drm_device *dev)
178 */ 132 */
179 /* CRTC? */ 133 /* CRTC? */
180 for (i = 0; i < 2; i++) { 134 for (i = 0; i < 2; i++) {
181 val = nv_rd32(dev, 0x00616100 + (i * 0x800)); 135 val = nv_rd32(device, 0x00616100 + (i * 0x800));
182 nv_wr32(dev, 0x00610190 + (i * 0x10), val); 136 nv_wr32(device, 0x00610190 + (i * 0x10), val);
183 val = nv_rd32(dev, 0x00616104 + (i * 0x800)); 137 val = nv_rd32(device, 0x00616104 + (i * 0x800));
184 nv_wr32(dev, 0x00610194 + (i * 0x10), val); 138 nv_wr32(device, 0x00610194 + (i * 0x10), val);
185 val = nv_rd32(dev, 0x00616108 + (i * 0x800)); 139 val = nv_rd32(device, 0x00616108 + (i * 0x800));
186 nv_wr32(dev, 0x00610198 + (i * 0x10), val); 140 nv_wr32(device, 0x00610198 + (i * 0x10), val);
187 val = nv_rd32(dev, 0x0061610c + (i * 0x800)); 141 val = nv_rd32(device, 0x0061610c + (i * 0x800));
188 nv_wr32(dev, 0x0061019c + (i * 0x10), val); 142 nv_wr32(device, 0x0061019c + (i * 0x10), val);
189 } 143 }
190 144
191 /* DAC */ 145 /* DAC */
192 for (i = 0; i < 3; i++) { 146 for (i = 0; i < 3; i++) {
193 val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); 147 val = nv_rd32(device, 0x0061a000 + (i * 0x800));
194 nv_wr32(dev, 0x006101d0 + (i * 0x04), val); 148 nv_wr32(device, 0x006101d0 + (i * 0x04), val);
195 } 149 }
196 150
197 /* SOR */ 151 /* SOR */
198 for (i = 0; i < nv50_sor_nr(dev); i++) { 152 for (i = 0; i < nv50_sor_nr(dev); i++) {
199 val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); 153 val = nv_rd32(device, 0x0061c000 + (i * 0x800));
200 nv_wr32(dev, 0x006101e0 + (i * 0x04), val); 154 nv_wr32(device, 0x006101e0 + (i * 0x04), val);
201 } 155 }
202 156
203 /* EXT */ 157 /* EXT */
204 for (i = 0; i < 3; i++) { 158 for (i = 0; i < 3; i++) {
205 val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); 159 val = nv_rd32(device, 0x0061e000 + (i * 0x800));
206 nv_wr32(dev, 0x006101f0 + (i * 0x04), val); 160 nv_wr32(device, 0x006101f0 + (i * 0x04), val);
207 } 161 }
208 162
209 for (i = 0; i < 3; i++) { 163 for (i = 0; i < 3; i++) {
210 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 | 164 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
211 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 165 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
212 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); 166 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
213 } 167 }
214 168
215 /* The precise purpose is unknown, i suspect it has something to do 169 /* The precise purpose is unknown, i suspect it has something to do
216 * with text mode. 170 * with text mode.
217 */ 171 */
218 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { 172 if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
219 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); 173 nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
220 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); 174 nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
221 if (!nv_wait(dev, 0x006194e8, 2, 0)) { 175 if (!nv_wait(device, 0x006194e8, 2, 0)) {
222 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); 176 NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
223 NV_ERROR(dev, "0x6194e8 = 0x%08x\n", 177 NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
224 nv_rd32(dev, 0x6194e8)); 178 nv_rd32(device, 0x6194e8));
225 return -EBUSY; 179 return -EBUSY;
226 } 180 }
227 } 181 }
228 182
229 for (i = 0; i < 2; i++) { 183 for (i = 0; i < 2; i++) {
230 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); 184 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
231 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 185 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
232 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 186 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
233 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 187 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
234 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 188 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
235 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 189 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
236 return -EBUSY; 190 return -EBUSY;
237 } 191 }
238 192
239 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 193 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
240 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); 194 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
241 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 195 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
242 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 196 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
243 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { 197 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
244 NV_ERROR(dev, "timeout: " 198 NV_ERROR(drm, "timeout: "
245 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); 199 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
246 NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i, 200 NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
247 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 201 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
248 return -EBUSY; 202 return -EBUSY;
249 } 203 }
250 } 204 }
251 205
252 nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); 206 nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
253 nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); 207 nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
254 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); 208 nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
255 nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); 209 nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
256 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 210 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
257 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | 211 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
258 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | 212 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
259 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); 213 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
@@ -263,7 +217,7 @@ nv50_display_init(struct drm_device *dev)
263 return ret; 217 return ret;
264 evo = nv50_display(dev)->master; 218 evo = nv50_display(dev)->master;
265 219
266 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 220 nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
267 221
268 ret = RING_SPACE(evo, 3); 222 ret = RING_SPACE(evo, 3);
269 if (ret) 223 if (ret)
@@ -278,12 +232,14 @@ nv50_display_init(struct drm_device *dev)
278void 232void
279nv50_display_fini(struct drm_device *dev) 233nv50_display_fini(struct drm_device *dev)
280{ 234{
235 struct nouveau_drm *drm = nouveau_drm(dev);
236 struct nouveau_device *device = nouveau_dev(dev);
281 struct nv50_display *disp = nv50_display(dev); 237 struct nv50_display *disp = nv50_display(dev);
282 struct nouveau_channel *evo = disp->master; 238 struct nouveau_channel *evo = disp->master;
283 struct drm_crtc *drm_crtc; 239 struct drm_crtc *drm_crtc;
284 int ret, i; 240 int ret, i;
285 241
286 NV_DEBUG_KMS(dev, "\n"); 242 NV_DEBUG(drm, "\n");
287 243
288 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 244 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
289 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 245 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -308,55 +264,59 @@ nv50_display_fini(struct drm_device *dev)
308 if (!crtc->base.enabled) 264 if (!crtc->base.enabled)
309 continue; 265 continue;
310 266
311 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); 267 nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
312 if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) { 268 if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
313 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " 269 NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
314 "0x%08x\n", mask, mask); 270 "0x%08x\n", mask, mask);
315 NV_ERROR(dev, "0x610024 = 0x%08x\n", 271 NV_ERROR(drm, "0x610024 = 0x%08x\n",
316 nv_rd32(dev, NV50_PDISPLAY_INTR_1)); 272 nv_rd32(device, NV50_PDISPLAY_INTR_1));
317 } 273 }
318 } 274 }
319 275
320 for (i = 0; i < 2; i++) { 276 for (i = 0; i < 2; i++) {
321 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); 277 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
322 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 278 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
323 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 279 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
324 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 280 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
325 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 281 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
326 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 282 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
327 } 283 }
328 } 284 }
329 285
330 nv50_evo_fini(dev); 286 nv50_evo_fini(dev);
331 287
332 for (i = 0; i < 3; i++) { 288 for (i = 0; i < 3; i++) {
333 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), 289 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
334 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 290 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
335 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); 291 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
336 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, 292 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
337 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i))); 293 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
338 } 294 }
339 } 295 }
340 296
341 /* disable interrupts. */ 297 /* disable interrupts. */
342 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); 298 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
343} 299}
344 300
345int 301int
346nv50_display_create(struct drm_device *dev) 302nv50_display_create(struct drm_device *dev)
347{ 303{
348 struct drm_nouveau_private *dev_priv = dev->dev_private; 304 struct nouveau_drm *drm = nouveau_drm(dev);
349 struct dcb_table *dcb = &dev_priv->vbios.dcb; 305 struct dcb_table *dcb = &drm->vbios.dcb;
350 struct drm_connector *connector, *ct; 306 struct drm_connector *connector, *ct;
351 struct nv50_display *priv; 307 struct nv50_display *priv;
352 int ret, i; 308 int ret, i;
353 309
354 NV_DEBUG_KMS(dev, "\n"); 310 NV_DEBUG(drm, "\n");
355 311
356 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 312 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
357 if (!priv) 313 if (!priv)
358 return -ENOMEM; 314 return -ENOMEM;
359 dev_priv->engine.display.priv = priv; 315
316 nouveau_display(dev)->priv = priv;
317 nouveau_display(dev)->dtor = nv50_display_destroy;
318 nouveau_display(dev)->init = nv50_display_init;
319 nouveau_display(dev)->fini = nv50_display_fini;
360 320
361 /* Create CRTC objects */ 321 /* Create CRTC objects */
362 for (i = 0; i < 2; i++) { 322 for (i = 0; i < 2; i++) {
@@ -367,10 +327,10 @@ nv50_display_create(struct drm_device *dev)
367 327
368 /* We setup the encoders from the BIOS table */ 328 /* We setup the encoders from the BIOS table */
369 for (i = 0 ; i < dcb->entries; i++) { 329 for (i = 0 ; i < dcb->entries; i++) {
370 struct dcb_entry *entry = &dcb->entry[i]; 330 struct dcb_output *entry = &dcb->entry[i];
371 331
372 if (entry->location != DCB_LOC_ON_CHIP) { 332 if (entry->location != DCB_LOC_ON_CHIP) {
373 NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n", 333 NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
374 entry->type, ffs(entry->or) - 1); 334 entry->type, ffs(entry->or) - 1);
375 continue; 335 continue;
376 } 336 }
@@ -380,16 +340,16 @@ nv50_display_create(struct drm_device *dev)
380 continue; 340 continue;
381 341
382 switch (entry->type) { 342 switch (entry->type) {
383 case OUTPUT_TMDS: 343 case DCB_OUTPUT_TMDS:
384 case OUTPUT_LVDS: 344 case DCB_OUTPUT_LVDS:
385 case OUTPUT_DP: 345 case DCB_OUTPUT_DP:
386 nv50_sor_create(connector, entry); 346 nv50_sor_create(connector, entry);
387 break; 347 break;
388 case OUTPUT_ANALOG: 348 case DCB_OUTPUT_ANALOG:
389 nv50_dac_create(connector, entry); 349 nv50_dac_create(connector, entry);
390 break; 350 break;
391 default: 351 default:
392 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); 352 NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
393 continue; 353 continue;
394 } 354 }
395 } 355 }
@@ -397,14 +357,13 @@ nv50_display_create(struct drm_device *dev)
397 list_for_each_entry_safe(connector, ct, 357 list_for_each_entry_safe(connector, ct,
398 &dev->mode_config.connector_list, head) { 358 &dev->mode_config.connector_list, head) {
399 if (!connector->encoder_ids[0]) { 359 if (!connector->encoder_ids[0]) {
400 NV_WARN(dev, "%s has no encoders, removing\n", 360 NV_WARN(drm, "%s has no encoders, removing\n",
401 drm_get_connector_name(connector)); 361 drm_get_connector_name(connector));
402 connector->funcs->destroy(connector); 362 connector->funcs->destroy(connector);
403 } 363 }
404 } 364 }
405 365
406 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); 366 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
407 nouveau_irq_register(dev, 26, nv50_display_isr);
408 367
409 ret = nv50_evo_create(dev); 368 ret = nv50_evo_create(dev);
410 if (ret) { 369 if (ret) {
@@ -420,13 +379,16 @@ nv50_display_destroy(struct drm_device *dev)
420{ 379{
421 struct nv50_display *disp = nv50_display(dev); 380 struct nv50_display *disp = nv50_display(dev);
422 381
423 NV_DEBUG_KMS(dev, "\n");
424
425 nv50_evo_destroy(dev); 382 nv50_evo_destroy(dev);
426 nouveau_irq_unregister(dev, 26);
427 kfree(disp); 383 kfree(disp);
428} 384}
429 385
386struct nouveau_bo *
387nv50_display_crtc_sema(struct drm_device *dev, int crtc)
388{
389 return nv50_display(dev)->crtc[crtc].sem.bo;
390}
391
430void 392void
431nv50_display_flip_stop(struct drm_crtc *crtc) 393nv50_display_flip_stop(struct drm_crtc *crtc)
432{ 394{
@@ -457,7 +419,7 @@ int
457nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, 419nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
458 struct nouveau_channel *chan) 420 struct nouveau_channel *chan)
459{ 421{
460 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 422 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
461 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 423 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
462 struct nv50_display *disp = nv50_display(crtc->dev); 424 struct nv50_display *disp = nv50_display(crtc->dev);
463 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 425 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -477,7 +439,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
477 return ret; 439 return ret;
478 } 440 }
479 441
480 if (dev_priv->chipset < 0xc0) { 442 if (nv_device(drm->device)->chipset < 0xc0) {
481 BEGIN_NV04(chan, 0, 0x0060, 2); 443 BEGIN_NV04(chan, 0, 0x0060, 2);
482 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 444 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
483 OUT_RING (chan, dispc->sem.offset); 445 OUT_RING (chan, dispc->sem.offset);
@@ -487,12 +449,12 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
487 OUT_RING (chan, dispc->sem.offset ^ 0x10); 449 OUT_RING (chan, dispc->sem.offset ^ 0x10);
488 OUT_RING (chan, 0x74b1e000); 450 OUT_RING (chan, 0x74b1e000);
489 BEGIN_NV04(chan, 0, 0x0060, 1); 451 BEGIN_NV04(chan, 0, 0x0060, 1);
490 if (dev_priv->chipset < 0x84) 452 if (nv_device(drm->device)->chipset < 0x84)
491 OUT_RING (chan, NvSema); 453 OUT_RING (chan, NvSema);
492 else 454 else
493 OUT_RING (chan, chan->vram_handle); 455 OUT_RING (chan, chan->vram);
494 } else { 456 } else {
495 u64 offset = nvc0_software_crtc(chan, nv_crtc->index); 457 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
496 offset += dispc->sem.offset; 458 offset += dispc->sem.offset;
497 BEGIN_NVC0(chan, 0, 0x0010, 4); 459 BEGIN_NVC0(chan, 0, 0x0010, 4);
498 OUT_RING (chan, upper_32_bits(offset)); 460 OUT_RING (chan, upper_32_bits(offset));
@@ -555,13 +517,13 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
555} 517}
556 518
557static u16 519static u16
558nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, 520nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
559 u32 mc, int pxclk) 521 u32 mc, int pxclk)
560{ 522{
561 struct drm_nouveau_private *dev_priv = dev->dev_private; 523 struct nouveau_drm *drm = nouveau_drm(dev);
562 struct nouveau_connector *nv_connector = NULL; 524 struct nouveau_connector *nv_connector = NULL;
563 struct drm_encoder *encoder; 525 struct drm_encoder *encoder;
564 struct nvbios *bios = &dev_priv->vbios; 526 struct nvbios *bios = &drm->vbios;
565 u32 script = 0, or; 527 u32 script = 0, or;
566 528
567 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 529 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -576,7 +538,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
576 538
577 or = ffs(dcb->or) - 1; 539 or = ffs(dcb->or) - 1;
578 switch (dcb->type) { 540 switch (dcb->type) {
579 case OUTPUT_LVDS: 541 case DCB_OUTPUT_LVDS:
580 script = (mc >> 8) & 0xf; 542 script = (mc >> 8) & 0xf;
581 if (bios->fp_no_ddc) { 543 if (bios->fp_no_ddc) {
582 if (bios->fp.dual_link) 544 if (bios->fp.dual_link)
@@ -609,34 +571,20 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
609 (nv_connector->edid->input & 0x70) >= 0x20) 571 (nv_connector->edid->input & 0x70) >= 0x20)
610 script |= 0x0200; 572 script |= 0x0200;
611 } 573 }
612
613 if (nouveau_uscript_lvds >= 0) {
614 NV_INFO(dev, "override script 0x%04x with 0x%04x "
615 "for output LVDS-%d\n", script,
616 nouveau_uscript_lvds, or);
617 script = nouveau_uscript_lvds;
618 }
619 break; 574 break;
620 case OUTPUT_TMDS: 575 case DCB_OUTPUT_TMDS:
621 script = (mc >> 8) & 0xf; 576 script = (mc >> 8) & 0xf;
622 if (pxclk >= 165000) 577 if (pxclk >= 165000)
623 script |= 0x0100; 578 script |= 0x0100;
624
625 if (nouveau_uscript_tmds >= 0) {
626 NV_INFO(dev, "override script 0x%04x with 0x%04x "
627 "for output TMDS-%d\n", script,
628 nouveau_uscript_tmds, or);
629 script = nouveau_uscript_tmds;
630 }
631 break; 579 break;
632 case OUTPUT_DP: 580 case DCB_OUTPUT_DP:
633 script = (mc >> 8) & 0xf; 581 script = (mc >> 8) & 0xf;
634 break; 582 break;
635 case OUTPUT_ANALOG: 583 case DCB_OUTPUT_ANALOG:
636 script = 0xff; 584 script = 0xff;
637 break; 585 break;
638 default: 586 default:
639 NV_ERROR(dev, "modeset on unsupported output type!\n"); 587 NV_ERROR(drm, "modeset on unsupported output type!\n");
640 break; 588 break;
641 } 589 }
642 590
@@ -644,59 +592,18 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
644} 592}
645 593
646static void 594static void
647nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
648{
649 struct drm_nouveau_private *dev_priv = dev->dev_private;
650 struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
651 struct nouveau_software_chan *pch, *tmp;
652
653 list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
654 if (pch->vblank.head != crtc)
655 continue;
656
657 spin_lock(&psw->peephole_lock);
658 nv_wr32(dev, 0x001704, pch->vblank.channel);
659 nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
660 if (dev_priv->chipset == 0x50) {
661 nv_wr32(dev, 0x001570, pch->vblank.offset);
662 nv_wr32(dev, 0x001574, pch->vblank.value);
663 } else {
664 nv_wr32(dev, 0x060010, pch->vblank.offset);
665 nv_wr32(dev, 0x060014, pch->vblank.value);
666 }
667 spin_unlock(&psw->peephole_lock);
668
669 list_del(&pch->vblank.list);
670 drm_vblank_put(dev, crtc);
671 }
672
673 drm_handle_vblank(dev, crtc);
674}
675
676static void
677nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
678{
679 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
680 nv50_display_vblank_crtc_handler(dev, 0);
681
682 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
683 nv50_display_vblank_crtc_handler(dev, 1);
684
685 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
686}
687
688static void
689nv50_display_unk10_handler(struct drm_device *dev) 595nv50_display_unk10_handler(struct drm_device *dev)
690{ 596{
691 struct drm_nouveau_private *dev_priv = dev->dev_private; 597 struct nouveau_device *device = nouveau_dev(dev);
598 struct nouveau_drm *drm = nouveau_drm(dev);
692 struct nv50_display *disp = nv50_display(dev); 599 struct nv50_display *disp = nv50_display(dev);
693 u32 unk30 = nv_rd32(dev, 0x610030), mc; 600 u32 unk30 = nv_rd32(device, 0x610030), mc;
694 int i, crtc, or = 0, type = OUTPUT_ANY; 601 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
695 602
696 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 603 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
697 disp->irq.dcb = NULL; 604 disp->irq.dcb = NULL;
698 605
699 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); 606 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
700 607
701 /* Determine which CRTC we're dealing with, only 1 ever will be 608 /* Determine which CRTC we're dealing with, only 1 ever will be
702 * signalled at the same time with the current nouveau code. 609 * signalled at the same time with the current nouveau code.
@@ -711,44 +618,44 @@ nv50_display_unk10_handler(struct drm_device *dev)
711 goto ack; 618 goto ack;
712 619
713 /* Find which encoder was connected to the CRTC */ 620 /* Find which encoder was connected to the CRTC */
714 for (i = 0; type == OUTPUT_ANY && i < 3; i++) { 621 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
715 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 622 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
716 NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); 623 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
717 if (!(mc & (1 << crtc))) 624 if (!(mc & (1 << crtc)))
718 continue; 625 continue;
719 626
720 switch ((mc & 0x00000f00) >> 8) { 627 switch ((mc & 0x00000f00) >> 8) {
721 case 0: type = OUTPUT_ANALOG; break; 628 case 0: type = DCB_OUTPUT_ANALOG; break;
722 case 1: type = OUTPUT_TV; break; 629 case 1: type = DCB_OUTPUT_TV; break;
723 default: 630 default:
724 NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 631 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
725 goto ack; 632 goto ack;
726 } 633 }
727 634
728 or = i; 635 or = i;
729 } 636 }
730 637
731 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 638 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
732 if (dev_priv->chipset < 0x90 || 639 if (nv_device(drm->device)->chipset < 0x90 ||
733 dev_priv->chipset == 0x92 || 640 nv_device(drm->device)->chipset == 0x92 ||
734 dev_priv->chipset == 0xa0) 641 nv_device(drm->device)->chipset == 0xa0)
735 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 642 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
736 else 643 else
737 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 644 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
738 645
739 NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); 646 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
740 if (!(mc & (1 << crtc))) 647 if (!(mc & (1 << crtc)))
741 continue; 648 continue;
742 649
743 switch ((mc & 0x00000f00) >> 8) { 650 switch ((mc & 0x00000f00) >> 8) {
744 case 0: type = OUTPUT_LVDS; break; 651 case 0: type = DCB_OUTPUT_LVDS; break;
745 case 1: type = OUTPUT_TMDS; break; 652 case 1: type = DCB_OUTPUT_TMDS; break;
746 case 2: type = OUTPUT_TMDS; break; 653 case 2: type = DCB_OUTPUT_TMDS; break;
747 case 5: type = OUTPUT_TMDS; break; 654 case 5: type = DCB_OUTPUT_TMDS; break;
748 case 8: type = OUTPUT_DP; break; 655 case 8: type = DCB_OUTPUT_DP; break;
749 case 9: type = OUTPUT_DP; break; 656 case 9: type = DCB_OUTPUT_DP; break;
750 default: 657 default:
751 NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 658 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
752 goto ack; 659 goto ack;
753 } 660 }
754 661
@@ -756,12 +663,12 @@ nv50_display_unk10_handler(struct drm_device *dev)
756 } 663 }
757 664
758 /* There was no encoder to disable */ 665 /* There was no encoder to disable */
759 if (type == OUTPUT_ANY) 666 if (type == DCB_OUTPUT_ANY)
760 goto ack; 667 goto ack;
761 668
762 /* Disable the encoder */ 669 /* Disable the encoder */
763 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 670 for (i = 0; i < drm->vbios.dcb.entries; i++) {
764 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; 671 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
765 672
766 if (dcb->type == type && (dcb->or & (1 << or))) { 673 if (dcb->type == type && (dcb->or & (1 << or))) {
767 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); 674 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
@@ -770,22 +677,23 @@ nv50_display_unk10_handler(struct drm_device *dev)
770 } 677 }
771 } 678 }
772 679
773 NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); 680 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
774ack: 681ack:
775 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); 682 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
776 nv_wr32(dev, 0x610030, 0x80000000); 683 nv_wr32(device, 0x610030, 0x80000000);
777} 684}
778 685
779static void 686static void
780nv50_display_unk20_handler(struct drm_device *dev) 687nv50_display_unk20_handler(struct drm_device *dev)
781{ 688{
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 689 struct nouveau_device *device = nouveau_dev(dev);
690 struct nouveau_drm *drm = nouveau_drm(dev);
783 struct nv50_display *disp = nv50_display(dev); 691 struct nv50_display *disp = nv50_display(dev);
784 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 692 u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
785 struct dcb_entry *dcb; 693 struct dcb_output *dcb;
786 int i, crtc, or = 0, type = OUTPUT_ANY; 694 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
787 695
788 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 696 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
789 dcb = disp->irq.dcb; 697 dcb = disp->irq.dcb;
790 if (dcb) { 698 if (dcb) {
791 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1); 699 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
@@ -795,86 +703,86 @@ nv50_display_unk20_handler(struct drm_device *dev)
795 /* CRTC clock change requested? */ 703 /* CRTC clock change requested? */
796 crtc = ffs((unk30 & 0x00000600) >> 9) - 1; 704 crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
797 if (crtc >= 0) { 705 if (crtc >= 0) {
798 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); 706 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
799 pclk &= 0x003fffff; 707 pclk &= 0x003fffff;
800 if (pclk) 708 if (pclk)
801 nv50_crtc_set_clock(dev, crtc, pclk); 709 nv50_crtc_set_clock(dev, crtc, pclk);
802 710
803 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); 711 tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
804 tmp &= ~0x000000f; 712 tmp &= ~0x000000f;
805 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); 713 nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
806 } 714 }
807 715
808 /* Nothing needs to be done for the encoder */ 716 /* Nothing needs to be done for the encoder */
809 crtc = ffs((unk30 & 0x00000180) >> 7) - 1; 717 crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
810 if (crtc < 0) 718 if (crtc < 0)
811 goto ack; 719 goto ack;
812 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; 720 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
813 721
814 /* Find which encoder is connected to the CRTC */ 722 /* Find which encoder is connected to the CRTC */
815 for (i = 0; type == OUTPUT_ANY && i < 3; i++) { 723 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
816 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); 724 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
817 NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); 725 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
818 if (!(mc & (1 << crtc))) 726 if (!(mc & (1 << crtc)))
819 continue; 727 continue;
820 728
821 switch ((mc & 0x00000f00) >> 8) { 729 switch ((mc & 0x00000f00) >> 8) {
822 case 0: type = OUTPUT_ANALOG; break; 730 case 0: type = DCB_OUTPUT_ANALOG; break;
823 case 1: type = OUTPUT_TV; break; 731 case 1: type = DCB_OUTPUT_TV; break;
824 default: 732 default:
825 NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 733 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
826 goto ack; 734 goto ack;
827 } 735 }
828 736
829 or = i; 737 or = i;
830 } 738 }
831 739
832 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 740 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
833 if (dev_priv->chipset < 0x90 || 741 if (nv_device(drm->device)->chipset < 0x90 ||
834 dev_priv->chipset == 0x92 || 742 nv_device(drm->device)->chipset == 0x92 ||
835 dev_priv->chipset == 0xa0) 743 nv_device(drm->device)->chipset == 0xa0)
836 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); 744 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
837 else 745 else
838 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); 746 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
839 747
840 NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); 748 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
841 if (!(mc & (1 << crtc))) 749 if (!(mc & (1 << crtc)))
842 continue; 750 continue;
843 751
844 switch ((mc & 0x00000f00) >> 8) { 752 switch ((mc & 0x00000f00) >> 8) {
845 case 0: type = OUTPUT_LVDS; break; 753 case 0: type = DCB_OUTPUT_LVDS; break;
846 case 1: type = OUTPUT_TMDS; break; 754 case 1: type = DCB_OUTPUT_TMDS; break;
847 case 2: type = OUTPUT_TMDS; break; 755 case 2: type = DCB_OUTPUT_TMDS; break;
848 case 5: type = OUTPUT_TMDS; break; 756 case 5: type = DCB_OUTPUT_TMDS; break;
849 case 8: type = OUTPUT_DP; break; 757 case 8: type = DCB_OUTPUT_DP; break;
850 case 9: type = OUTPUT_DP; break; 758 case 9: type = DCB_OUTPUT_DP; break;
851 default: 759 default:
852 NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 760 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
853 goto ack; 761 goto ack;
854 } 762 }
855 763
856 or = i; 764 or = i;
857 } 765 }
858 766
859 if (type == OUTPUT_ANY) 767 if (type == DCB_OUTPUT_ANY)
860 goto ack; 768 goto ack;
861 769
862 /* Enable the encoder */ 770 /* Enable the encoder */
863 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 771 for (i = 0; i < drm->vbios.dcb.entries; i++) {
864 dcb = &dev_priv->vbios.dcb.entry[i]; 772 dcb = &drm->vbios.dcb.entry[i];
865 if (dcb->type == type && (dcb->or & (1 << or))) 773 if (dcb->type == type && (dcb->or & (1 << or)))
866 break; 774 break;
867 } 775 }
868 776
869 if (i == dev_priv->vbios.dcb.entries) { 777 if (i == drm->vbios.dcb.entries) {
870 NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); 778 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
871 goto ack; 779 goto ack;
872 } 780 }
873 781
874 script = nv50_display_script_select(dev, dcb, mc, pclk); 782 script = nv50_display_script_select(dev, dcb, mc, pclk);
875 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); 783 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
876 784
877 if (type == OUTPUT_DP) { 785 if (type == DCB_OUTPUT_DP) {
878 int link = !(dcb->dpconf.sor.link & 1); 786 int link = !(dcb->dpconf.sor.link & 1);
879 if ((mc & 0x000f0000) == 0x00020000) 787 if ((mc & 0x000f0000) == 0x00020000)
880 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); 788 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
@@ -882,14 +790,14 @@ nv50_display_unk20_handler(struct drm_device *dev)
882 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); 790 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
883 } 791 }
884 792
885 if (dcb->type != OUTPUT_ANALOG) { 793 if (dcb->type != DCB_OUTPUT_ANALOG) {
886 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); 794 tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
887 tmp &= ~0x00000f0f; 795 tmp &= ~0x00000f0f;
888 if (script & 0x0100) 796 if (script & 0x0100)
889 tmp |= 0x00000101; 797 tmp |= 0x00000101;
890 nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); 798 nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
891 } else { 799 } else {
892 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); 800 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
893 } 801 }
894 802
895 disp->irq.dcb = dcb; 803 disp->irq.dcb = dcb;
@@ -897,8 +805,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
897 disp->irq.script = script; 805 disp->irq.script = script;
898 806
899ack: 807ack:
900 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); 808 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
901 nv_wr32(dev, 0x610030, 0x80000000); 809 nv_wr32(device, 0x610030, 0x80000000);
902} 810}
903 811
904/* If programming a TMDS output on a SOR that can also be configured for 812/* If programming a TMDS output on a SOR that can also be configured for
@@ -910,23 +818,24 @@ ack:
910 * programmed for DisplayPort. 818 * programmed for DisplayPort.
911 */ 819 */
912static void 820static void
913nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb) 821nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
914{ 822{
823 struct nouveau_device *device = nouveau_dev(dev);
915 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); 824 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
916 struct drm_encoder *encoder; 825 struct drm_encoder *encoder;
917 u32 tmp; 826 u32 tmp;
918 827
919 if (dcb->type != OUTPUT_TMDS) 828 if (dcb->type != DCB_OUTPUT_TMDS)
920 return; 829 return;
921 830
922 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
923 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 832 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
924 833
925 if (nv_encoder->dcb->type == OUTPUT_DP && 834 if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
926 nv_encoder->dcb->or & (1 << or)) { 835 nv_encoder->dcb->or & (1 << or)) {
927 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); 836 tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
928 tmp &= ~NV50_SOR_DP_CTRL_ENABLED; 837 tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
929 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); 838 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
930 break; 839 break;
931 } 840 }
932 } 841 }
@@ -935,12 +844,14 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
935static void 844static void
936nv50_display_unk40_handler(struct drm_device *dev) 845nv50_display_unk40_handler(struct drm_device *dev)
937{ 846{
847 struct nouveau_device *device = nouveau_dev(dev);
848 struct nouveau_drm *drm = nouveau_drm(dev);
938 struct nv50_display *disp = nv50_display(dev); 849 struct nv50_display *disp = nv50_display(dev);
939 struct dcb_entry *dcb = disp->irq.dcb; 850 struct dcb_output *dcb = disp->irq.dcb;
940 u16 script = disp->irq.script; 851 u16 script = disp->irq.script;
941 u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk; 852 u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
942 853
943 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 854 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
944 disp->irq.dcb = NULL; 855 disp->irq.dcb = NULL;
945 if (!dcb) 856 if (!dcb)
946 goto ack; 857 goto ack;
@@ -949,21 +860,23 @@ nv50_display_unk40_handler(struct drm_device *dev)
949 nv50_display_unk40_dp_set_tmds(dev, dcb); 860 nv50_display_unk40_dp_set_tmds(dev, dcb);
950 861
951ack: 862ack:
952 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); 863 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
953 nv_wr32(dev, 0x610030, 0x80000000); 864 nv_wr32(device, 0x610030, 0x80000000);
954 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8); 865 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
955} 866}
956 867
957static void 868static void
958nv50_display_bh(unsigned long data) 869nv50_display_bh(unsigned long data)
959{ 870{
960 struct drm_device *dev = (struct drm_device *)data; 871 struct drm_device *dev = (struct drm_device *)data;
872 struct nouveau_device *device = nouveau_dev(dev);
873 struct nouveau_drm *drm = nouveau_drm(dev);
961 874
962 for (;;) { 875 for (;;) {
963 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 876 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
964 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 877 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
965 878
966 NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); 879 NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
967 880
968 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) 881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
969 nv50_display_unk10_handler(dev); 882 nv50_display_unk10_handler(dev);
@@ -977,13 +890,15 @@ nv50_display_bh(unsigned long data)
977 break; 890 break;
978 } 891 }
979 892
980 nv_wr32(dev, NV03_PMC_INTR_EN_0, 1); 893 nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
981} 894}
982 895
983static void 896static void
984nv50_display_error_handler(struct drm_device *dev) 897nv50_display_error_handler(struct drm_device *dev)
985{ 898{
986 u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; 899 struct nouveau_device *device = nouveau_dev(dev);
900 struct nouveau_drm *drm = nouveau_drm(dev);
901 u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
987 u32 addr, data; 902 u32 addr, data;
988 int chid; 903 int chid;
989 904
@@ -991,29 +906,31 @@ nv50_display_error_handler(struct drm_device *dev)
991 if (!(channels & (1 << chid))) 906 if (!(channels & (1 << chid)))
992 continue; 907 continue;
993 908
994 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); 909 nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
995 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); 910 addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
996 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); 911 data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
997 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " 912 NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
998 "(0x%04x 0x%02x)\n", chid, 913 "(0x%04x 0x%02x)\n", chid,
999 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); 914 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1000 915
1001 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); 916 nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
1002 } 917 }
1003} 918}
1004 919
1005static void 920void
1006nv50_display_isr(struct drm_device *dev) 921nv50_display_intr(struct drm_device *dev)
1007{ 922{
923 struct nouveau_device *device = nouveau_dev(dev);
924 struct nouveau_drm *drm = nouveau_drm(dev);
1008 struct nv50_display *disp = nv50_display(dev); 925 struct nv50_display *disp = nv50_display(dev);
1009 uint32_t delayed = 0; 926 uint32_t delayed = 0;
1010 927
1011 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 928 while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
1012 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 929 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
1013 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 930 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
1014 uint32_t clock; 931 uint32_t clock;
1015 932
1016 NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); 933 NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
1017 934
1018 if (!intr0 && !(intr1 & ~delayed)) 935 if (!intr0 && !(intr1 & ~delayed))
1019 break; 936 break;
@@ -1024,29 +941,29 @@ nv50_display_isr(struct drm_device *dev)
1024 } 941 }
1025 942
1026 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 943 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
1027 nv50_display_vblank_handler(dev, intr1);
1028 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 944 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
945 delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
1029 } 946 }
1030 947
1031 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | 948 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
1032 NV50_PDISPLAY_INTR_1_CLK_UNK20 | 949 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
1033 NV50_PDISPLAY_INTR_1_CLK_UNK40)); 950 NV50_PDISPLAY_INTR_1_CLK_UNK40));
1034 if (clock) { 951 if (clock) {
1035 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 952 nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
1036 tasklet_schedule(&disp->tasklet); 953 tasklet_schedule(&disp->tasklet);
1037 delayed |= clock; 954 delayed |= clock;
1038 intr1 &= ~clock; 955 intr1 &= ~clock;
1039 } 956 }
1040 957
1041 if (intr0) { 958 if (intr0) {
1042 NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); 959 NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
1043 nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0); 960 nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
1044 } 961 }
1045 962
1046 if (intr1) { 963 if (intr1) {
1047 NV_ERROR(dev, 964 NV_ERROR(drm,
1048 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); 965 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
1049 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1); 966 nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
1050 } 967 }
1051 } 968 }
1052} 969}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index e9db9b97f041..973554d8a7a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -27,13 +27,9 @@
27#ifndef __NV50_DISPLAY_H__ 27#ifndef __NV50_DISPLAY_H__
28#define __NV50_DISPLAY_H__ 28#define __NV50_DISPLAY_H__
29 29
30#include "drmP.h" 30#include "nouveau_display.h"
31#include "drm.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_reg.h"
35#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
36#include "nouveau_software.h" 32#include "nouveau_reg.h"
37#include "nv50_evo.h" 33#include "nv50_evo.h"
38 34
39struct nv50_display_crtc { 35struct nv50_display_crtc {
@@ -47,13 +43,16 @@ struct nv50_display_crtc {
47 43
48struct nv50_display { 44struct nv50_display {
49 struct nouveau_channel *master; 45 struct nouveau_channel *master;
50 struct nouveau_gpuobj *ntfy; 46
47 struct nouveau_gpuobj *ramin;
48 u32 dmao;
49 u32 hash;
51 50
52 struct nv50_display_crtc crtc[2]; 51 struct nv50_display_crtc crtc[2];
53 52
54 struct tasklet_struct tasklet; 53 struct tasklet_struct tasklet;
55 struct { 54 struct {
56 struct dcb_entry *dcb; 55 struct dcb_output *dcb;
57 u16 script; 56 u16 script;
58 u32 pclk; 57 u32 pclk;
59 } irq; 58 } irq;
@@ -62,8 +61,7 @@ struct nv50_display {
62static inline struct nv50_display * 61static inline struct nv50_display *
63nv50_display(struct drm_device *dev) 62nv50_display(struct drm_device *dev)
64{ 63{
65 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 return nouveau_display(dev)->priv;
66 return dev_priv->engine.display.priv;
67} 65}
68 66
69int nv50_display_early_init(struct drm_device *dev); 67int nv50_display_early_init(struct drm_device *dev);
@@ -72,6 +70,7 @@ int nv50_display_create(struct drm_device *dev);
72int nv50_display_init(struct drm_device *dev); 70int nv50_display_init(struct drm_device *dev);
73void nv50_display_fini(struct drm_device *dev); 71void nv50_display_fini(struct drm_device *dev);
74void nv50_display_destroy(struct drm_device *dev); 72void nv50_display_destroy(struct drm_device *dev);
73void nv50_display_intr(struct drm_device *);
75int nv50_crtc_blank(struct nouveau_crtc *, bool blank); 74int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
76int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); 75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
77 76
@@ -91,4 +90,17 @@ void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
91int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype, 90int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
92 u64 base, u64 size, struct nouveau_gpuobj **); 91 u64 base, u64 size, struct nouveau_gpuobj **);
93 92
93int nvd0_display_create(struct drm_device *);
94void nvd0_display_destroy(struct drm_device *);
95int nvd0_display_init(struct drm_device *);
96void nvd0_display_fini(struct drm_device *);
97void nvd0_display_intr(struct drm_device *);
98
99void nvd0_display_flip_stop(struct drm_crtc *);
100int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
101 struct nouveau_channel *, u32 swap_interval);
102
103struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
104struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
105
94#endif /* __NV50_DISPLAY_H__ */ 106#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index ddcd55595824..0f534160c021 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -24,11 +24,29 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_dma.h" 28#include "nouveau_dma.h"
29#include "nouveau_ramht.h"
30#include "nv50_display.h" 29#include "nv50_display.h"
31 30
31#include <core/gpuobj.h>
32
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32
37nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38{
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41}
42
43static void
44nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45{
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48}
49
32static void 50static void
33nv50_evo_channel_del(struct nouveau_channel **pevo) 51nv50_evo_channel_del(struct nouveau_channel **pevo)
34{ 52{
@@ -38,26 +56,29 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
38 return; 56 return;
39 *pevo = NULL; 57 *pevo = NULL;
40 58
41 nouveau_ramht_ref(NULL, &evo->ramht, evo); 59 nouveau_bo_unmap(evo->push.buffer);
42 nouveau_gpuobj_channel_takedown(evo); 60 nouveau_bo_ref(NULL, &evo->push.buffer);
43 nouveau_bo_unmap(evo->pushbuf_bo);
44 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
45 61
46 if (evo->user) 62 if (evo->object)
47 iounmap(evo->user); 63 iounmap(evo->object->oclass->ofuncs);
48 64
49 kfree(evo); 65 kfree(evo);
50} 66}
51 67
52void 68int
53nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) 69nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
54{ 71{
55 struct drm_nouveau_private *dev_priv = obj->dev->dev_private; 72 struct drm_device *dev = evo->fence;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
56 u32 flags5; 77 u32 flags5;
57 78
58 if (dev_priv->chipset < 0xc0) { 79 if (nv_device(drm->device)->chipset < 0xc0) {
59 /* not supported on 0x50, specified in format mthd */ 80 /* not supported on 0x50, specified in format mthd */
60 if (dev_priv->chipset == 0x50) 81 if (nv_device(drm->device)->chipset == 0x50)
61 memtype = 0; 82 memtype = 0;
62 flags5 = 0x00010000; 83 flags5 = 0x00010000;
63 } else { 84 } else {
@@ -67,42 +88,28 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
67 flags5 = 0x00020000; 88 flags5 = 0x00020000;
68 } 89 }
69 90
70 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, 91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
71 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); 92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
72 nv_wo32(obj, 0x14, flags5); 93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
73 dev_priv->engine.instmem.flush(obj->dev); 94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
74} 95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
75 98
76int 99 nv_wo32(disp->ramin, hash + 0x00, handle);
77nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, 100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
78 u64 base, u64 size, struct nouveau_gpuobj **pobj) 101 evo->handle);
79{
80 struct nv50_display *disp = nv50_display(evo->dev);
81 struct nouveau_gpuobj *obj = NULL;
82 int ret;
83
84 ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
85 if (ret)
86 return ret;
87 obj->engine = NVOBJ_ENGINE_DISPLAY;
88
89 nv50_evo_dmaobj_init(obj, memtype, base, size);
90
91 ret = nouveau_ramht_insert(evo, handle, obj);
92 if (ret)
93 goto out;
94 102
95 if (pobj) 103 disp->dmao += 0x20;
96 nouveau_gpuobj_ref(obj, pobj); 104 disp->hash += 0x08;
97out: 105 return 0;
98 nouveau_gpuobj_ref(NULL, &obj);
99 return ret;
100} 106}
101 107
102static int 108static int
103nv50_evo_channel_new(struct drm_device *dev, int chid, 109nv50_evo_channel_new(struct drm_device *dev, int chid,
104 struct nouveau_channel **pevo) 110 struct nouveau_channel **pevo)
105{ 111{
112 struct nouveau_drm *drm = nouveau_drm(dev);
106 struct nv50_display *disp = nv50_display(dev); 113 struct nv50_display *disp = nv50_display(dev);
107 struct nouveau_channel *evo; 114 struct nouveau_channel *evo;
108 int ret; 115 int ret;
@@ -112,79 +119,84 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
112 return -ENOMEM; 119 return -ENOMEM;
113 *pevo = evo; 120 *pevo = evo;
114 121
115 evo->id = chid; 122 evo->drm = drm;
116 evo->dev = dev; 123 evo->handle = chid;
124 evo->fence = dev;
117 evo->user_get = 4; 125 evo->user_get = 4;
118 evo->user_put = 0; 126 evo->user_put = 0;
119 127
120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, 128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
121 &evo->pushbuf_bo); 129 &evo->push.buffer);
122 if (ret == 0) 130 if (ret == 0)
123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
124 if (ret) { 132 if (ret) {
125 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); 133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
126 nv50_evo_channel_del(pevo); 134 nv50_evo_channel_del(pevo);
127 return ret; 135 return ret;
128 } 136 }
129 137
130 ret = nouveau_bo_map(evo->pushbuf_bo); 138 ret = nouveau_bo_map(evo->push.buffer);
131 if (ret) { 139 if (ret) {
132 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); 140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
133 nv50_evo_channel_del(pevo); 141 nv50_evo_channel_del(pevo);
134 return ret; 142 return ret;
135 } 143 }
136 144
137 evo->user = ioremap(pci_resource_start(dev->pdev, 0) + 145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
138 NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); 146#ifdef NOUVEAU_OBJECT_MAGIC
139 if (!evo->user) { 147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
140 NV_ERROR(dev, "Error mapping EVO control regs.\n"); 148#endif
141 nv50_evo_channel_del(pevo); 149 evo->object->parent = nv_object(disp->ramin)->parent;
142 return -ENOMEM; 150 evo->object->engine = nv_object(disp->ramin)->engine;
143 } 151 evo->object->oclass =
144 152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
145 /* bind primary evo channel's ramht to the channel */ 153 evo->object->oclass->ofuncs =
146 if (disp->master && evo != disp->master) 154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
147 nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); 155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
148 156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
149 return 0; 160 return 0;
150} 161}
151 162
152static int 163static int
153nv50_evo_channel_init(struct nouveau_channel *evo) 164nv50_evo_channel_init(struct nouveau_channel *evo)
154{ 165{
155 struct drm_device *dev = evo->dev; 166 struct nouveau_drm *drm = evo->drm;
156 int id = evo->id, ret, i; 167 struct nouveau_device *device = nv_device(drm->device);
157 u64 pushbuf = evo->pushbuf_bo->bo.offset; 168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
158 u32 tmp; 170 u32 tmp;
159 171
160 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
161 if ((tmp & 0x009f0000) == 0x00020000) 173 if ((tmp & 0x009f0000) == 0x00020000)
162 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); 174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
163 175
164 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
165 if ((tmp & 0x003f0000) == 0x00030000) 177 if ((tmp & 0x003f0000) == 0x00030000)
166 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); 178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
167 179
168 /* initialise fifo */ 180 /* initialise fifo */
169 nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | 181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
170 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | 182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
171 NV50_PDISPLAY_EVO_DMA_CB_VALID); 183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
172 nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); 184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
173 nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); 185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
174 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, 186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
175 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); 187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
176 188
177 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); 189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
178 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | 190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
179 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); 191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
180 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { 192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
181 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, 193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
182 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); 194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
183 return -EBUSY; 195 return -EBUSY;
184 } 196 }
185 197
186 /* enable error reporting on the channel */ 198 /* enable error reporting on the channel */
187 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); 199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
188 200
189 evo->dma.max = (4096/4) - 2; 201 evo->dma.max = (4096/4) - 2;
190 evo->dma.max &= ~7; 202 evo->dma.max &= ~7;
@@ -205,16 +217,17 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
205static void 217static void
206nv50_evo_channel_fini(struct nouveau_channel *evo) 218nv50_evo_channel_fini(struct nouveau_channel *evo)
207{ 219{
208 struct drm_device *dev = evo->dev; 220 struct nouveau_drm *drm = evo->drm;
209 int id = evo->id; 221 struct nouveau_device *device = nv_device(drm->device);
210 222 int id = evo->handle;
211 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); 223
212 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); 224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
213 nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); 225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
214 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); 226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
215 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { 227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
216 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, 228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
217 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); 229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
218 } 231 }
219} 232}
220 233
@@ -231,93 +244,66 @@ nv50_evo_destroy(struct drm_device *dev)
231 } 244 }
232 nv50_evo_channel_del(&disp->crtc[i].sync); 245 nv50_evo_channel_del(&disp->crtc[i].sync);
233 } 246 }
234 nouveau_gpuobj_ref(NULL, &disp->ntfy);
235 nv50_evo_channel_del(&disp->master); 247 nv50_evo_channel_del(&disp->master);
248 nouveau_gpuobj_ref(NULL, &disp->ramin);
236} 249}
237 250
238int 251int
239nv50_evo_create(struct drm_device *dev) 252nv50_evo_create(struct drm_device *dev)
240{ 253{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
242 struct nv50_display *disp = nv50_display(dev); 256 struct nv50_display *disp = nv50_display(dev);
243 struct nouveau_gpuobj *ramht = NULL;
244 struct nouveau_channel *evo; 257 struct nouveau_channel *evo;
245 int ret, i, j; 258 int ret, i, j;
246 259
247 /* create primary evo channel, the one we use for modesetting
248 * purporses
249 */
250 ret = nv50_evo_channel_new(dev, 0, &disp->master);
251 if (ret)
252 return ret;
253 evo = disp->master;
254
255 /* setup object management on it, any other evo channel will 260 /* setup object management on it, any other evo channel will
256 * use this also as there's no per-channel support on the 261 * use this also as there's no per-channel support on the
257 * hardware 262 * hardware
258 */ 263 */
259 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, 264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
260 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); 265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
261 if (ret) {
262 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
263 goto err;
264 }
265
266 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
267 if (ret) { 266 if (ret) {
268 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); 267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
269 goto err; 268 goto err;
270 } 269 }
271 270
272 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); 271 disp->hash = 0x0000;
273 if (ret) { 272 disp->dmao = 0x1000;
274 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
275 goto err;
276 }
277
278 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
279 nouveau_gpuobj_ref(NULL, &ramht);
280 if (ret)
281 goto err;
282 273
283 /* not sure exactly what this is.. 274 /* create primary evo channel, the one we use for modesetting
284 * 275 * purporses
285 * the first dword of the structure is used by nvidia to wait on
286 * full completion of an EVO "update" command.
287 *
288 * method 0x8c on the master evo channel will fill a lot more of
289 * this structure with some undefined info
290 */ 276 */
291 ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, 277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
292 NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
293 if (ret) 278 if (ret)
294 goto err; 279 return ret;
280 evo = disp->master;
295 281
296 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, 282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
297 disp->ntfy->vinst, disp->ntfy->size, NULL); 283 disp->ramin->addr + 0x2000, 0x1000, NULL);
298 if (ret) 284 if (ret)
299 goto err; 285 goto err;
300 286
301 /* create some default objects for the scanout memtypes we support */ 287 /* create some default objects for the scanout memtypes we support */
302 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, 288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
303 0, dev_priv->vram_size, NULL); 289 0, pfb->ram.size, NULL);
304 if (ret) 290 if (ret)
305 goto err; 291 goto err;
306 292
307 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, 293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
308 0, dev_priv->vram_size, NULL); 294 0, pfb->ram.size, NULL);
309 if (ret) 295 if (ret)
310 goto err; 296 goto err;
311 297
312 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | 298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
313 (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
314 0, dev_priv->vram_size, NULL); 300 0, pfb->ram.size, NULL);
315 if (ret) 301 if (ret)
316 goto err; 302 goto err;
317 303
318 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | 304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
319 (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
320 0, dev_priv->vram_size, NULL); 306 0, pfb->ram.size, NULL);
321 if (ret) 307 if (ret)
322 goto err; 308 goto err;
323 309
@@ -352,21 +338,21 @@ nv50_evo_create(struct drm_device *dev)
352 goto err; 338 goto err;
353 339
354 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, 340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
355 0, dev_priv->vram_size, NULL); 341 0, pfb->ram.size, NULL);
356 if (ret) 342 if (ret)
357 goto err; 343 goto err;
358 344
359 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | 345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
360 (dev_priv->chipset < 0xc0 ? 346 (nv_device(drm->device)->chipset < 0xc0 ?
361 0x7a00 : 0xfe00), 347 0x7a : 0xfe),
362 0, dev_priv->vram_size, NULL); 348 0, pfb->ram.size, NULL);
363 if (ret) 349 if (ret)
364 goto err; 350 goto err;
365 351
366 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | 352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
367 (dev_priv->chipset < 0xc0 ? 353 (nv_device(drm->device)->chipset < 0xc0 ?
368 0x7000 : 0xfe00), 354 0x70 : 0xfe),
369 0, dev_priv->vram_size, NULL); 355 0, pfb->ram.size, NULL);
370 if (ret) 356 if (ret)
371 goto err; 357 goto err;
372 358
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
deleted file mode 100644
index f1e4b9e07d14..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ /dev/null
@@ -1,296 +0,0 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5#include "nouveau_fifo.h"
6
7struct nv50_fb_priv {
8 struct page *r100c08_page;
9 dma_addr_t r100c08;
10};
11
12static void
13nv50_fb_destroy(struct drm_device *dev)
14{
15 struct drm_nouveau_private *dev_priv = dev->dev_private;
16 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
17 struct nv50_fb_priv *priv = pfb->priv;
18
19 if (drm_mm_initialized(&pfb->tag_heap))
20 drm_mm_takedown(&pfb->tag_heap);
21
22 if (priv->r100c08_page) {
23 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
24 PCI_DMA_BIDIRECTIONAL);
25 __free_page(priv->r100c08_page);
26 }
27
28 kfree(priv);
29 pfb->priv = NULL;
30}
31
32static int
33nv50_fb_create(struct drm_device *dev)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
37 struct nv50_fb_priv *priv;
38 u32 tagmem;
39 int ret;
40
41 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
42 if (!priv)
43 return -ENOMEM;
44 pfb->priv = priv;
45
46 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
47 if (!priv->r100c08_page) {
48 nv50_fb_destroy(dev);
49 return -ENOMEM;
50 }
51
52 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
53 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
54 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
55 nv50_fb_destroy(dev);
56 return -EFAULT;
57 }
58
59 tagmem = nv_rd32(dev, 0x100320);
60 NV_DEBUG(dev, "%d tags available\n", tagmem);
61 ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
62 if (ret) {
63 nv50_fb_destroy(dev);
64 return ret;
65 }
66
67 return 0;
68}
69
70int
71nv50_fb_init(struct drm_device *dev)
72{
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nv50_fb_priv *priv;
75 int ret;
76
77 if (!dev_priv->engine.fb.priv) {
78 ret = nv50_fb_create(dev);
79 if (ret)
80 return ret;
81 }
82 priv = dev_priv->engine.fb.priv;
83
84 /* Not a clue what this is exactly. Without pointing it at a
85 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
86 * cause IOMMU "read from address 0" errors (rh#561267)
87 */
88 nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
89
90 /* This is needed to get meaningful information from 100c90
91 * on traps. No idea what these values mean exactly. */
92 switch (dev_priv->chipset) {
93 case 0x50:
94 nv_wr32(dev, 0x100c90, 0x000707ff);
95 break;
96 case 0xa3:
97 case 0xa5:
98 case 0xa8:
99 nv_wr32(dev, 0x100c90, 0x000d0fff);
100 break;
101 case 0xaf:
102 nv_wr32(dev, 0x100c90, 0x089d1fff);
103 break;
104 default:
105 nv_wr32(dev, 0x100c90, 0x001d07ff);
106 break;
107 }
108
109 return 0;
110}
111
112void
113nv50_fb_takedown(struct drm_device *dev)
114{
115 nv50_fb_destroy(dev);
116}
117
118static struct nouveau_enum vm_dispatch_subclients[] = {
119 { 0x00000000, "GRCTX", NULL },
120 { 0x00000001, "NOTIFY", NULL },
121 { 0x00000002, "QUERY", NULL },
122 { 0x00000003, "COND", NULL },
123 { 0x00000004, "M2M_IN", NULL },
124 { 0x00000005, "M2M_OUT", NULL },
125 { 0x00000006, "M2M_NOTIFY", NULL },
126 {}
127};
128
129static struct nouveau_enum vm_ccache_subclients[] = {
130 { 0x00000000, "CB", NULL },
131 { 0x00000001, "TIC", NULL },
132 { 0x00000002, "TSC", NULL },
133 {}
134};
135
136static struct nouveau_enum vm_prop_subclients[] = {
137 { 0x00000000, "RT0", NULL },
138 { 0x00000001, "RT1", NULL },
139 { 0x00000002, "RT2", NULL },
140 { 0x00000003, "RT3", NULL },
141 { 0x00000004, "RT4", NULL },
142 { 0x00000005, "RT5", NULL },
143 { 0x00000006, "RT6", NULL },
144 { 0x00000007, "RT7", NULL },
145 { 0x00000008, "ZETA", NULL },
146 { 0x00000009, "LOCAL", NULL },
147 { 0x0000000a, "GLOBAL", NULL },
148 { 0x0000000b, "STACK", NULL },
149 { 0x0000000c, "DST2D", NULL },
150 {}
151};
152
153static struct nouveau_enum vm_pfifo_subclients[] = {
154 { 0x00000000, "PUSHBUF", NULL },
155 { 0x00000001, "SEMAPHORE", NULL },
156 {}
157};
158
159static struct nouveau_enum vm_bar_subclients[] = {
160 { 0x00000000, "FB", NULL },
161 { 0x00000001, "IN", NULL },
162 {}
163};
164
165static struct nouveau_enum vm_client[] = {
166 { 0x00000000, "STRMOUT", NULL },
167 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
168 { 0x00000004, "PFIFO_WRITE", NULL },
169 { 0x00000005, "CCACHE", vm_ccache_subclients },
170 { 0x00000006, "PPPP", NULL },
171 { 0x00000007, "CLIPID", NULL },
172 { 0x00000008, "PFIFO_READ", NULL },
173 { 0x00000009, "VFETCH", NULL },
174 { 0x0000000a, "TEXTURE", NULL },
175 { 0x0000000b, "PROP", vm_prop_subclients },
176 { 0x0000000c, "PVP", NULL },
177 { 0x0000000d, "PBSP", NULL },
178 { 0x0000000e, "PCRYPT", NULL },
179 { 0x0000000f, "PCOUNTER", NULL },
180 { 0x00000011, "PDAEMON", NULL },
181 {}
182};
183
184static struct nouveau_enum vm_engine[] = {
185 { 0x00000000, "PGRAPH", NULL },
186 { 0x00000001, "PVP", NULL },
187 { 0x00000004, "PEEPHOLE", NULL },
188 { 0x00000005, "PFIFO", vm_pfifo_subclients },
189 { 0x00000006, "BAR", vm_bar_subclients },
190 { 0x00000008, "PPPP", NULL },
191 { 0x00000009, "PBSP", NULL },
192 { 0x0000000a, "PCRYPT", NULL },
193 { 0x0000000b, "PCOUNTER", NULL },
194 { 0x0000000c, "SEMAPHORE_BG", NULL },
195 { 0x0000000d, "PCOPY", NULL },
196 { 0x0000000e, "PDAEMON", NULL },
197 {}
198};
199
200static struct nouveau_enum vm_fault[] = {
201 { 0x00000000, "PT_NOT_PRESENT", NULL },
202 { 0x00000001, "PT_TOO_SHORT", NULL },
203 { 0x00000002, "PAGE_NOT_PRESENT", NULL },
204 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
205 { 0x00000004, "PAGE_READ_ONLY", NULL },
206 { 0x00000006, "NULL_DMAOBJ", NULL },
207 { 0x00000007, "WRONG_MEMTYPE", NULL },
208 { 0x0000000b, "VRAM_LIMIT", NULL },
209 { 0x0000000f, "DMAOBJ_LIMIT", NULL },
210 {}
211};
212
213void
214nv50_fb_vm_trap(struct drm_device *dev, int display)
215{
216 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
217 struct drm_nouveau_private *dev_priv = dev->dev_private;
218 const struct nouveau_enum *en, *cl;
219 unsigned long flags;
220 u32 trap[6], idx, chinst;
221 u8 st0, st1, st2, st3;
222 int i, ch;
223
224 idx = nv_rd32(dev, 0x100c90);
225 if (!(idx & 0x80000000))
226 return;
227 idx &= 0x00ffffff;
228
229 for (i = 0; i < 6; i++) {
230 nv_wr32(dev, 0x100c90, idx | i << 24);
231 trap[i] = nv_rd32(dev, 0x100c94);
232 }
233 nv_wr32(dev, 0x100c90, idx | 0x80000000);
234
235 if (!display)
236 return;
237
238 /* lookup channel id */
239 chinst = (trap[2] << 16) | trap[1];
240 spin_lock_irqsave(&dev_priv->channels.lock, flags);
241 for (ch = 0; ch < pfifo->channels; ch++) {
242 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
243
244 if (!chan || !chan->ramin)
245 continue;
246
247 if (chinst == chan->ramin->vinst >> 12)
248 break;
249 }
250 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
251
252 /* decode status bits into something more useful */
253 if (dev_priv->chipset < 0xa3 ||
254 dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
255 st0 = (trap[0] & 0x0000000f) >> 0;
256 st1 = (trap[0] & 0x000000f0) >> 4;
257 st2 = (trap[0] & 0x00000f00) >> 8;
258 st3 = (trap[0] & 0x0000f000) >> 12;
259 } else {
260 st0 = (trap[0] & 0x000000ff) >> 0;
261 st1 = (trap[0] & 0x0000ff00) >> 8;
262 st2 = (trap[0] & 0x00ff0000) >> 16;
263 st3 = (trap[0] & 0xff000000) >> 24;
264 }
265
266 NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
267 (trap[5] & 0x00000100) ? "read" : "write",
268 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
269
270 en = nouveau_enum_find(vm_engine, st0);
271 if (en)
272 printk("%s/", en->name);
273 else
274 printk("%02x/", st0);
275
276 cl = nouveau_enum_find(vm_client, st2);
277 if (cl)
278 printk("%s/", cl->name);
279 else
280 printk("%02x/", st2);
281
282 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
283 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
284 else cl = NULL;
285 if (cl)
286 printk("%s", cl->name);
287 else
288 printk("%02x", st3);
289
290 printk(" reason: ");
291 en = nouveau_enum_find(vm_fault, st1);
292 if (en)
293 printk("%s\n", en->name);
294 else
295 printk("0x%08x\n", st1);
296}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e3c8b05dcae4..52068a0910dc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
31 28
32int 29int
33nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -156,10 +150,11 @@ int
156nv50_fbcon_accel_init(struct fb_info *info) 150nv50_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 153 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
154 struct drm_device *dev = nfbdev->dev;
155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 switch (info->var.bits_per_pixel) { 160 switch (info->var.bits_per_pixel) {
@@ -189,7 +184,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
189 return -EINVAL; 184 return -EINVAL;
190 } 185 }
191 186
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); 187 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
188 0x502d, NULL, 0, &object);
193 if (ret) 189 if (ret)
194 return ret; 190 return ret;
195 191
@@ -202,9 +198,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1); 198 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 199 OUT_RING(chan, Nv2D);
204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3); 200 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, chan->vram_handle); 201 OUT_RING(chan, NvDmaFB);
206 OUT_RING(chan, chan->vram_handle); 202 OUT_RING(chan, NvDmaFB);
207 OUT_RING(chan, chan->vram_handle); 203 OUT_RING(chan, NvDmaFB);
208 BEGIN_NV04(chan, NvSub2D, 0x0290, 1); 204 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
209 OUT_RING(chan, 0); 205 OUT_RING(chan, 0);
210 BEGIN_NV04(chan, NvSub2D, 0x0888, 1); 206 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
new file mode 100644
index 000000000000..e0763ea88ee2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30#include "nouveau_fence.h"
31
32#include "nv50_display.h"
33
34struct nv50_fence_chan {
35 struct nouveau_fence_chan base;
36};
37
38struct nv50_fence_priv {
39 struct nouveau_fence_priv base;
40 struct nouveau_bo *bo;
41 spinlock_t lock;
42 u32 sequence;
43};
44
45static int
46nv50_fence_context_new(struct nouveau_channel *chan)
47{
48 struct drm_device *dev = chan->drm->dev;
49 struct nv50_fence_priv *priv = chan->drm->fence;
50 struct nv50_fence_chan *fctx;
51 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
52 struct nouveau_object *object;
53 int ret, i;
54
55 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
56 if (!fctx)
57 return -ENOMEM;
58
59 nouveau_fence_context_new(&fctx->base);
60
61 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
62 NvSema, 0x0002,
63 &(struct nv_dma_class) {
64 .flags = NV_DMA_TARGET_VRAM |
65 NV_DMA_ACCESS_RDWR,
66 .start = mem->start * PAGE_SIZE,
67 .limit = mem->size - 1,
68 }, sizeof(struct nv_dma_class),
69 &object);
70
71 /* dma objects for display sync channel semaphore blocks */
72 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
73 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
74
75 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
76 NvEvoSema0 + i, 0x003d,
77 &(struct nv_dma_class) {
78 .flags = NV_DMA_TARGET_VRAM |
79 NV_DMA_ACCESS_RDWR,
80 .start = bo->bo.offset,
81 .limit = bo->bo.offset + 0xfff,
82 }, sizeof(struct nv_dma_class),
83 &object);
84 }
85
86 if (ret)
87 nv10_fence_context_del(chan);
88 return ret;
89}
90
91int
92nv50_fence_create(struct nouveau_drm *drm)
93{
94 struct nv50_fence_priv *priv;
95 int ret = 0;
96
97 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
98 if (!priv)
99 return -ENOMEM;
100
101 priv->base.dtor = nv10_fence_destroy;
102 priv->base.context_new = nv50_fence_context_new;
103 priv->base.context_del = nv10_fence_context_del;
104 priv->base.emit = nv10_fence_emit;
105 priv->base.read = nv10_fence_read;
106 priv->base.sync = nv17_fence_sync;
107 spin_lock_init(&priv->lock);
108
109 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
110 0, 0x0000, NULL, &priv->bo);
111 if (!ret) {
112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
113 if (!ret)
114 ret = nouveau_bo_map(priv->bo);
115 if (ret)
116 nouveau_bo_ref(NULL, &priv->bo);
117 }
118
119 if (ret == 0) {
120 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
121 priv->base.sync = nv17_fence_sync;
122 }
123
124 if (ret)
125 nv10_fence_destroy(drm);
126 return ret;
127}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
deleted file mode 100644
index 55383b85db0b..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ /dev/null
@@ -1,294 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_vm.h"
33
34struct nv50_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv50_fifo_chan {
41 struct nouveau_fifo_chan base;
42};
43
44void
45nv50_fifo_playlist_update(struct drm_device *dev)
46{
47 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
48 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_gpuobj *cur;
50 int i, p;
51
52 cur = priv->playlist[priv->cur_playlist];
53 priv->cur_playlist = !priv->cur_playlist;
54
55 for (i = 0, p = 0; i < priv->base.channels; i++) {
56 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
57 nv_wo32(cur, p++ * 4, i);
58 }
59
60 dev_priv->engine.instmem.flush(dev);
61
62 nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
63 nv_wr32(dev, 0x0032ec, p);
64 nv_wr32(dev, 0x002500, 0x00000101);
65}
66
67static int
68nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
69{
70 struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
71 struct nv50_fifo_chan *fctx;
72 struct drm_device *dev = chan->dev;
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
75 u64 instance = chan->ramin->vinst >> 12;
76 unsigned long flags;
77 int ret = 0, i;
78
79 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
80 if (!fctx)
81 return -ENOMEM;
82 atomic_inc(&chan->vm->engref[engine]);
83
84 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
85 NV50_USER(chan->id), PAGE_SIZE);
86 if (!chan->user) {
87 ret = -ENOMEM;
88 goto error;
89 }
90
91 for (i = 0; i < 0x100; i += 4)
92 nv_wo32(chan->ramin, i, 0x00000000);
93 nv_wo32(chan->ramin, 0x3c, 0x403f6078);
94 nv_wo32(chan->ramin, 0x40, 0x00000000);
95 nv_wo32(chan->ramin, 0x44, 0x01003fff);
96 nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
97 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
98 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
99 drm_order(chan->dma.ib_max + 1) << 16);
100 nv_wo32(chan->ramin, 0x60, 0x7fffffff);
101 nv_wo32(chan->ramin, 0x78, 0x00000000);
102 nv_wo32(chan->ramin, 0x7c, 0x30000001);
103 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
104 (4 << 24) /* SEARCH_FULL */ |
105 (chan->ramht->gpuobj->cinst >> 4));
106
107 dev_priv->engine.instmem.flush(dev);
108
109 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
110 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
111 nv50_fifo_playlist_update(dev);
112 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
113
114error:
115 if (ret)
116 priv->base.base.context_del(chan, engine);
117 return ret;
118}
119
120static bool
121nv50_fifo_kickoff(struct nouveau_channel *chan)
122{
123 struct drm_device *dev = chan->dev;
124 bool done = true;
125 u32 me;
126
127 /* HW bug workaround:
128 *
129 * PFIFO will hang forever if the connected engines don't report
130 * that they've processed the context switch request.
131 *
132 * In order for the kickoff to work, we need to ensure all the
133 * connected engines are in a state where they can answer.
134 *
135 * Newer chipsets don't seem to suffer from this issue, and well,
136 * there's also a "ignore these engines" bitmask reg we can use
137 * if we hit the issue there..
138 */
139
140 /* PME: make sure engine is enabled */
141 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
142
143 /* do the kickoff... */
144 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
145 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
146 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
147 done = false;
148 }
149
150 /* restore any engine states we changed, and exit */
151 nv_wr32(dev, 0x00b860, me);
152 return done;
153}
154
155static void
156nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
157{
158 struct nv50_fifo_chan *fctx = chan->engctx[engine];
159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 unsigned long flags;
162
163 /* remove channel from playlist, will context switch if active */
164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
165 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
166 nv50_fifo_playlist_update(dev);
167
168 /* tell any engines on this channel to unload their contexts */
169 nv50_fifo_kickoff(chan);
170
171 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
172 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
173
174 /* clean up */
175 if (chan->user) {
176 iounmap(chan->user);
177 chan->user = NULL;
178 }
179
180 atomic_dec(&chan->vm->engref[engine]);
181 chan->engctx[engine] = NULL;
182 kfree(fctx);
183}
184
185static int
186nv50_fifo_init(struct drm_device *dev, int engine)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 u32 instance;
190 int i;
191
192 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
193 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
194 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
195 nv_wr32(dev, 0x002044, 0x01003fff);
196
197 nv_wr32(dev, 0x002100, 0xffffffff);
198 nv_wr32(dev, 0x002140, 0xffffffff);
199
200 for (i = 0; i < 128; i++) {
201 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan && chan->engctx[engine])
203 instance = 0x80000000 | chan->ramin->vinst >> 12;
204 else
205 instance = 0x00000000;
206 nv_wr32(dev, 0x002600 + (i * 4), instance);
207 }
208
209 nv50_fifo_playlist_update(dev);
210
211 nv_wr32(dev, 0x003200, 1);
212 nv_wr32(dev, 0x003250, 1);
213 nv_wr32(dev, 0x002500, 1);
214 return 0;
215}
216
217static int
218nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
219{
220 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
222 int i;
223
224 /* set playlist length to zero, fifo will unload context */
225 nv_wr32(dev, 0x0032ec, 0);
226
227 /* tell all connected engines to unload their contexts */
228 for (i = 0; i < priv->base.channels; i++) {
229 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
230 if (chan && !nv50_fifo_kickoff(chan))
231 return -EBUSY;
232 }
233
234 nv_wr32(dev, 0x002140, 0);
235 return 0;
236}
237
238void
239nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
240{
241 nv50_vm_flush_engine(dev, 5);
242}
243
244void
245nv50_fifo_destroy(struct drm_device *dev, int engine)
246{
247 struct drm_nouveau_private *dev_priv = dev->dev_private;
248 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
249
250 nouveau_irq_unregister(dev, 8);
251
252 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
253 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
254
255 dev_priv->eng[engine] = NULL;
256 kfree(priv);
257}
258
259int
260nv50_fifo_create(struct drm_device *dev)
261{
262 struct drm_nouveau_private *dev_priv = dev->dev_private;
263 struct nv50_fifo_priv *priv;
264 int ret;
265
266 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
267 if (!priv)
268 return -ENOMEM;
269
270 priv->base.base.destroy = nv50_fifo_destroy;
271 priv->base.base.init = nv50_fifo_init;
272 priv->base.base.fini = nv50_fifo_fini;
273 priv->base.base.context_new = nv50_fifo_context_new;
274 priv->base.base.context_del = nv50_fifo_context_del;
275 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
276 priv->base.channels = 127;
277 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
278
279 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
280 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
281 if (ret)
282 goto error;
283
284 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
285 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
286 if (ret)
287 goto error;
288
289 nouveau_irq_register(dev, 8, nv04_fifo_isr);
290error:
291 if (ret)
292 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
293 return ret;
294}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
deleted file mode 100644
index c399d510b27a..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dmi.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29#include "nouveau_gpio.h"
30
31#include "nv50_display.h"
32
33static int
34nv50_gpio_location(int line, u32 *reg, u32 *shift)
35{
36 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
37
38 if (line >= 32)
39 return -EINVAL;
40
41 *reg = nv50_gpio_reg[line >> 3];
42 *shift = (line & 7) << 2;
43 return 0;
44}
45
46int
47nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
48{
49 u32 reg, shift;
50
51 if (nv50_gpio_location(line, &reg, &shift))
52 return -EINVAL;
53
54 nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
55 return 0;
56}
57
58int
59nv50_gpio_sense(struct drm_device *dev, int line)
60{
61 u32 reg, shift;
62
63 if (nv50_gpio_location(line, &reg, &shift))
64 return -EINVAL;
65
66 return !!(nv_rd32(dev, reg) & (4 << shift));
67}
68
69void
70nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
71{
72 u32 reg = line < 16 ? 0xe050 : 0xe070;
73 u32 mask = 0x00010001 << (line & 0xf);
74
75 nv_wr32(dev, reg + 4, mask);
76 nv_mask(dev, reg + 0, mask, on ? mask : 0);
77}
78
79int
80nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
81{
82 u32 data = ((dir ^ 1) << 13) | (out << 12);
83 nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
84 nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
85 return 0;
86}
87
88int
89nvd0_gpio_sense(struct drm_device *dev, int line)
90{
91 return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
92}
93
94static void
95nv50_gpio_isr(struct drm_device *dev)
96{
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 u32 intr0, intr1 = 0;
99 u32 hi, lo;
100
101 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
102 if (dev_priv->chipset >= 0x90)
103 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
104
105 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
106 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
107 nouveau_gpio_isr(dev, 0, hi | lo);
108
109 nv_wr32(dev, 0xe054, intr0);
110 if (dev_priv->chipset >= 0x90)
111 nv_wr32(dev, 0xe074, intr1);
112}
113
114static struct dmi_system_id gpio_reset_ids[] = {
115 {
116 .ident = "Apple Macbook 10,1",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
119 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
120 }
121 },
122 { }
123};
124
125int
126nv50_gpio_init(struct drm_device *dev)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 /* initialise gpios and routing to vbios defaults */
131 if (dmi_check_system(gpio_reset_ids))
132 nouveau_gpio_reset(dev);
133
134 /* disable, and ack any pending gpio interrupts */
135 nv_wr32(dev, 0xe050, 0x00000000);
136 nv_wr32(dev, 0xe054, 0xffffffff);
137 if (dev_priv->chipset >= 0x90) {
138 nv_wr32(dev, 0xe070, 0x00000000);
139 nv_wr32(dev, 0xe074, 0xffffffff);
140 }
141
142 nouveau_irq_register(dev, 21, nv50_gpio_isr);
143 return 0;
144}
145
146void
147nv50_gpio_fini(struct drm_device *dev)
148{
149 struct drm_nouveau_private *dev_priv = dev->dev_private;
150
151 nv_wr32(dev, 0xe050, 0x00000000);
152 if (dev_priv->chipset >= 0x90)
153 nv_wr32(dev, 0xe070, 0x00000000);
154 nouveau_irq_unregister(dev, 21);
155}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
deleted file mode 100644
index 437608d1dfe7..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ /dev/null
@@ -1,868 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_dma.h"
33#include "nouveau_vm.h"
34#include "nv50_evo.h"
35
36struct nv50_graph_engine {
37 struct nouveau_exec_engine base;
38 u32 ctxprog[512];
39 u32 ctxprog_size;
40 u32 grctx_size;
41};
42
43static int
44nv50_graph_init(struct drm_device *dev, int engine)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
48 u32 units = nv_rd32(dev, 0x001540);
49 int i;
50
51 NV_DEBUG(dev, "\n");
52
53 /* master reset */
54 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
55 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
56 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
57
58 /* reset/enable traps and interrupts */
59 nv_wr32(dev, 0x400804, 0xc0000000);
60 nv_wr32(dev, 0x406800, 0xc0000000);
61 nv_wr32(dev, 0x400c04, 0xc0000000);
62 nv_wr32(dev, 0x401800, 0xc0000000);
63 nv_wr32(dev, 0x405018, 0xc0000000);
64 nv_wr32(dev, 0x402000, 0xc0000000);
65 for (i = 0; i < 16; i++) {
66 if (!(units & (1 << i)))
67 continue;
68
69 if (dev_priv->chipset < 0xa0) {
70 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
71 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
72 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
73 } else {
74 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
75 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
76 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
77 }
78 }
79
80 nv_wr32(dev, 0x400108, 0xffffffff);
81 nv_wr32(dev, 0x400138, 0xffffffff);
82 nv_wr32(dev, 0x400100, 0xffffffff);
83 nv_wr32(dev, 0x40013c, 0xffffffff);
84 nv_wr32(dev, 0x400500, 0x00010001);
85
86 /* upload context program, initialise ctxctl defaults */
87 nv_wr32(dev, 0x400324, 0x00000000);
88 for (i = 0; i < pgraph->ctxprog_size; i++)
89 nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
90 nv_wr32(dev, 0x400824, 0x00000000);
91 nv_wr32(dev, 0x400828, 0x00000000);
92 nv_wr32(dev, 0x40082c, 0x00000000);
93 nv_wr32(dev, 0x400830, 0x00000000);
94 nv_wr32(dev, 0x400724, 0x00000000);
95 nv_wr32(dev, 0x40032c, 0x00000000);
96 nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
97
98 /* some unknown zcull magic */
99 switch (dev_priv->chipset & 0xf0) {
100 case 0x50:
101 case 0x80:
102 case 0x90:
103 nv_wr32(dev, 0x402ca8, 0x00000800);
104 break;
105 case 0xa0:
106 default:
107 nv_wr32(dev, 0x402cc0, 0x00000000);
108 if (dev_priv->chipset == 0xa0 ||
109 dev_priv->chipset == 0xaa ||
110 dev_priv->chipset == 0xac) {
111 nv_wr32(dev, 0x402ca8, 0x00000802);
112 } else {
113 nv_wr32(dev, 0x402cc0, 0x00000000);
114 nv_wr32(dev, 0x402ca8, 0x00000002);
115 }
116
117 break;
118 }
119
120 /* zero out zcull regions */
121 for (i = 0; i < 8; i++) {
122 nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
123 nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
124 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
125 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
126 }
127
128 return 0;
129}
130
131static int
132nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
133{
134 nv_wr32(dev, 0x40013c, 0x00000000);
135 return 0;
136}
137
138static int
139nv50_graph_context_new(struct nouveau_channel *chan, int engine)
140{
141 struct drm_device *dev = chan->dev;
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nouveau_gpuobj *ramin = chan->ramin;
144 struct nouveau_gpuobj *grctx = NULL;
145 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
146 int hdr, ret;
147
148 NV_DEBUG(dev, "ch%d\n", chan->id);
149
150 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
151 NVOBJ_FLAG_ZERO_ALLOC |
152 NVOBJ_FLAG_ZERO_FREE, &grctx);
153 if (ret)
154 return ret;
155
156 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
157 nv_wo32(ramin, hdr + 0x00, 0x00190002);
158 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
159 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
160 nv_wo32(ramin, hdr + 0x0c, 0);
161 nv_wo32(ramin, hdr + 0x10, 0);
162 nv_wo32(ramin, hdr + 0x14, 0x00010000);
163
164 nv50_grctx_fill(dev, grctx);
165 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
166
167 dev_priv->engine.instmem.flush(dev);
168
169 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
170 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
171 return 0;
172}
173
174static void
175nv50_graph_context_del(struct nouveau_channel *chan, int engine)
176{
177 struct nouveau_gpuobj *grctx = chan->engctx[engine];
178 struct drm_device *dev = chan->dev;
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
181
182 for (i = hdr; i < hdr + 24; i += 4)
183 nv_wo32(chan->ramin, i, 0);
184 dev_priv->engine.instmem.flush(dev);
185
186 atomic_dec(&chan->vm->engref[engine]);
187 nouveau_gpuobj_ref(NULL, &grctx);
188 chan->engctx[engine] = NULL;
189}
190
191static int
192nv50_graph_object_new(struct nouveau_channel *chan, int engine,
193 u32 handle, u16 class)
194{
195 struct drm_device *dev = chan->dev;
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_gpuobj *obj = NULL;
198 int ret;
199
200 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
201 if (ret)
202 return ret;
203 obj->engine = 1;
204 obj->class = class;
205
206 nv_wo32(obj, 0x00, class);
207 nv_wo32(obj, 0x04, 0x00000000);
208 nv_wo32(obj, 0x08, 0x00000000);
209 nv_wo32(obj, 0x0c, 0x00000000);
210 dev_priv->engine.instmem.flush(dev);
211
212 ret = nouveau_ramht_insert(chan, handle, obj);
213 nouveau_gpuobj_ref(NULL, &obj);
214 return ret;
215}
216
217static void
218nv50_graph_tlb_flush(struct drm_device *dev, int engine)
219{
220 nv50_vm_flush_engine(dev, 0);
221}
222
223static void
224nv84_graph_tlb_flush(struct drm_device *dev, int engine)
225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
228 bool idle, timeout = false;
229 unsigned long flags;
230 u64 start;
231 u32 tmp;
232
233 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
234 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
235
236 start = ptimer->read(dev);
237 do {
238 idle = true;
239
240 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
241 if ((tmp & 7) == 1)
242 idle = false;
243 }
244
245 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
246 if ((tmp & 7) == 1)
247 idle = false;
248 }
249
250 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
251 if ((tmp & 7) == 1)
252 idle = false;
253 }
254 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
255
256 if (timeout) {
257 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
258 "0x%08x 0x%08x 0x%08x 0x%08x\n",
259 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
260 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
261 }
262
263 nv50_vm_flush_engine(dev, 0);
264
265 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
266 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
267}
268
269static struct nouveau_enum nv50_mp_exec_error_names[] = {
270 { 3, "STACK_UNDERFLOW", NULL },
271 { 4, "QUADON_ACTIVE", NULL },
272 { 8, "TIMEOUT", NULL },
273 { 0x10, "INVALID_OPCODE", NULL },
274 { 0x40, "BREAKPOINT", NULL },
275 {}
276};
277
278static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
279 { 0x00000001, "NOTIFY" },
280 { 0x00000002, "IN" },
281 { 0x00000004, "OUT" },
282 {}
283};
284
285static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
286 { 0x00000001, "FAULT" },
287 {}
288};
289
290static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
291 { 0x00000001, "FAULT" },
292 {}
293};
294
295static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
296 { 0x00000001, "FAULT" },
297 {}
298};
299
300/* There must be a *lot* of these. Will take some time to gather them up. */
301struct nouveau_enum nv50_data_error_names[] = {
302 { 0x00000003, "INVALID_OPERATION", NULL },
303 { 0x00000004, "INVALID_VALUE", NULL },
304 { 0x00000005, "INVALID_ENUM", NULL },
305 { 0x00000008, "INVALID_OBJECT", NULL },
306 { 0x00000009, "READ_ONLY_OBJECT", NULL },
307 { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
308 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
309 { 0x0000000c, "INVALID_BITFIELD", NULL },
310 { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
311 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
312 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
313 { 0x00000010, "RT_DOUBLE_BIND", NULL },
314 { 0x00000011, "RT_TYPES_MISMATCH", NULL },
315 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
316 { 0x00000015, "FP_TOO_FEW_REGS", NULL },
317 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
318 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
319 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
320 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
321 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
322 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
323 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
324 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
325 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
326 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
327 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
328 { 0x00000024, "VP_ZERO_INPUTS", NULL },
329 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
330 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
331 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
332 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
333 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
334 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
335 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
336 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
337 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
338 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
339 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
340 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
341 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
342 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
343 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
344 {}
345};
346
347static struct nouveau_bitfield nv50_graph_intr[] = {
348 { 0x00000001, "NOTIFY" },
349 { 0x00000002, "COMPUTE_QUERY" },
350 { 0x00000010, "ILLEGAL_MTHD" },
351 { 0x00000020, "ILLEGAL_CLASS" },
352 { 0x00000040, "DOUBLE_NOTIFY" },
353 { 0x00001000, "CONTEXT_SWITCH" },
354 { 0x00010000, "BUFFER_NOTIFY" },
355 { 0x00100000, "DATA_ERROR" },
356 { 0x00200000, "TRAP" },
357 { 0x01000000, "SINGLE_STEP" },
358 {}
359};
360
361static void
362nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
363{
364 struct drm_nouveau_private *dev_priv = dev->dev_private;
365 uint32_t units = nv_rd32(dev, 0x1540);
366 uint32_t addr, mp10, status, pc, oplow, ophigh;
367 int i;
368 int mps = 0;
369 for (i = 0; i < 4; i++) {
370 if (!(units & 1 << (i+24)))
371 continue;
372 if (dev_priv->chipset < 0xa0)
373 addr = 0x408200 + (tpid << 12) + (i << 7);
374 else
375 addr = 0x408100 + (tpid << 11) + (i << 7);
376 mp10 = nv_rd32(dev, addr + 0x10);
377 status = nv_rd32(dev, addr + 0x14);
378 if (!status)
379 continue;
380 if (display) {
381 nv_rd32(dev, addr + 0x20);
382 pc = nv_rd32(dev, addr + 0x24);
383 oplow = nv_rd32(dev, addr + 0x70);
384 ophigh = nv_rd32(dev, addr + 0x74);
385 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
386 "TP %d MP %d: ", tpid, i);
387 nouveau_enum_print(nv50_mp_exec_error_names, status);
388 printk(" at %06x warp %d, opcode %08x %08x\n",
389 pc&0xffffff, pc >> 24,
390 oplow, ophigh);
391 }
392 nv_wr32(dev, addr + 0x10, mp10);
393 nv_wr32(dev, addr + 0x14, 0);
394 mps++;
395 }
396 if (!mps && display)
397 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
398 "No MPs claiming errors?\n", tpid);
399}
400
401static void
402nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
403 uint32_t ustatus_new, int display, const char *name)
404{
405 struct drm_nouveau_private *dev_priv = dev->dev_private;
406 int tps = 0;
407 uint32_t units = nv_rd32(dev, 0x1540);
408 int i, r;
409 uint32_t ustatus_addr, ustatus;
410 for (i = 0; i < 16; i++) {
411 if (!(units & (1 << i)))
412 continue;
413 if (dev_priv->chipset < 0xa0)
414 ustatus_addr = ustatus_old + (i << 12);
415 else
416 ustatus_addr = ustatus_new + (i << 11);
417 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
418 if (!ustatus)
419 continue;
420 tps++;
421 switch (type) {
422 case 6: /* texture error... unknown for now */
423 if (display) {
424 NV_ERROR(dev, "magic set %d:\n", i);
425 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
426 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
427 nv_rd32(dev, r));
428 }
429 break;
430 case 7: /* MP error */
431 if (ustatus & 0x04030000) {
432 nv50_pgraph_mp_trap(dev, i, display);
433 ustatus &= ~0x04030000;
434 }
435 break;
436 case 8: /* TPDMA error */
437 {
438 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
439 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
440 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
441 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
442 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
443 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
444 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
445 /* 2d engine destination */
446 if (ustatus & 0x00000010) {
447 if (display) {
448 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
449 i, e14, e10);
450 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
451 i, e0c, e18, e1c, e20, e24);
452 }
453 ustatus &= ~0x00000010;
454 }
455 /* Render target */
456 if (ustatus & 0x00000040) {
457 if (display) {
458 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
459 i, e14, e10);
460 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
461 i, e0c, e18, e1c, e20, e24);
462 }
463 ustatus &= ~0x00000040;
464 }
465 /* CUDA memory: l[], g[] or stack. */
466 if (ustatus & 0x00000080) {
467 if (display) {
468 if (e18 & 0x80000000) {
469 /* g[] read fault? */
470 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
471 i, e14, e10 | ((e18 >> 24) & 0x1f));
472 e18 &= ~0x1f000000;
473 } else if (e18 & 0xc) {
474 /* g[] write fault? */
475 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
476 i, e14, e10 | ((e18 >> 7) & 0x1f));
477 e18 &= ~0x00000f80;
478 } else {
479 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
480 i, e14, e10);
481 }
482 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
483 i, e0c, e18, e1c, e20, e24);
484 }
485 ustatus &= ~0x00000080;
486 }
487 }
488 break;
489 }
490 if (ustatus) {
491 if (display)
492 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
493 }
494 nv_wr32(dev, ustatus_addr, 0xc0000000);
495 }
496
497 if (!tps && display)
498 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
499}
500
501static int
502nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
503{
504 u32 status = nv_rd32(dev, 0x400108);
505 u32 ustatus;
506
507 if (!status && display) {
508 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
509 return 1;
510 }
511
512 /* DISPATCH: Relays commands to other units and handles NOTIFY,
513 * COND, QUERY. If you get a trap from it, the command is still stuck
514 * in DISPATCH and you need to do something about it. */
515 if (status & 0x001) {
516 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
517 if (!ustatus && display) {
518 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
519 }
520
521 nv_wr32(dev, 0x400500, 0x00000000);
522
523 /* Known to be triggered by screwed up NOTIFY and COND... */
524 if (ustatus & 0x00000001) {
525 u32 addr = nv_rd32(dev, 0x400808);
526 u32 subc = (addr & 0x00070000) >> 16;
527 u32 mthd = (addr & 0x00001ffc);
528 u32 datal = nv_rd32(dev, 0x40080c);
529 u32 datah = nv_rd32(dev, 0x400810);
530 u32 class = nv_rd32(dev, 0x400814);
531 u32 r848 = nv_rd32(dev, 0x400848);
532
533 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
534 if (display && (addr & 0x80000000)) {
535 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
536 "subc %d class 0x%04x mthd 0x%04x "
537 "data 0x%08x%08x "
538 "400808 0x%08x 400848 0x%08x\n",
539 chid, inst, subc, class, mthd, datah,
540 datal, addr, r848);
541 } else
542 if (display) {
543 NV_INFO(dev, "PGRAPH - no stuck command?\n");
544 }
545
546 nv_wr32(dev, 0x400808, 0);
547 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
548 nv_wr32(dev, 0x400848, 0);
549 ustatus &= ~0x00000001;
550 }
551
552 if (ustatus & 0x00000002) {
553 u32 addr = nv_rd32(dev, 0x40084c);
554 u32 subc = (addr & 0x00070000) >> 16;
555 u32 mthd = (addr & 0x00001ffc);
556 u32 data = nv_rd32(dev, 0x40085c);
557 u32 class = nv_rd32(dev, 0x400814);
558
559 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
560 if (display && (addr & 0x80000000)) {
561 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
562 "subc %d class 0x%04x mthd 0x%04x "
563 "data 0x%08x 40084c 0x%08x\n",
564 chid, inst, subc, class, mthd,
565 data, addr);
566 } else
567 if (display) {
568 NV_INFO(dev, "PGRAPH - no stuck command?\n");
569 }
570
571 nv_wr32(dev, 0x40084c, 0);
572 ustatus &= ~0x00000002;
573 }
574
575 if (ustatus && display) {
576 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
577 "0x%08x)\n", ustatus);
578 }
579
580 nv_wr32(dev, 0x400804, 0xc0000000);
581 nv_wr32(dev, 0x400108, 0x001);
582 status &= ~0x001;
583 if (!status)
584 return 0;
585 }
586
587 /* M2MF: Memory to memory copy engine. */
588 if (status & 0x002) {
589 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
590 if (display) {
591 NV_INFO(dev, "PGRAPH - TRAP_M2MF");
592 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
593 printk("\n");
594 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
595 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
596 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
597
598 }
599
600 /* No sane way found yet -- just reset the bugger. */
601 nv_wr32(dev, 0x400040, 2);
602 nv_wr32(dev, 0x400040, 0);
603 nv_wr32(dev, 0x406800, 0xc0000000);
604 nv_wr32(dev, 0x400108, 0x002);
605 status &= ~0x002;
606 }
607
608 /* VFETCH: Fetches data from vertex buffers. */
609 if (status & 0x004) {
610 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
611 if (display) {
612 NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
613 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
614 printk("\n");
615 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
616 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
617 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
618 }
619
620 nv_wr32(dev, 0x400c04, 0xc0000000);
621 nv_wr32(dev, 0x400108, 0x004);
622 status &= ~0x004;
623 }
624
625 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
626 if (status & 0x008) {
627 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
628 if (display) {
629 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
630 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
631 printk("\n");
632 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
633 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
634 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
635
636 }
637
638 /* No sane way found yet -- just reset the bugger. */
639 nv_wr32(dev, 0x400040, 0x80);
640 nv_wr32(dev, 0x400040, 0);
641 nv_wr32(dev, 0x401800, 0xc0000000);
642 nv_wr32(dev, 0x400108, 0x008);
643 status &= ~0x008;
644 }
645
646 /* CCACHE: Handles code and c[] caches and fills them. */
647 if (status & 0x010) {
648 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
649 if (display) {
650 NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
651 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
652 printk("\n");
653 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
654 " %08x %08x %08x\n",
655 nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
656 nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
657 nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
658 nv_rd32(dev, 0x40501c));
659
660 }
661
662 nv_wr32(dev, 0x405018, 0xc0000000);
663 nv_wr32(dev, 0x400108, 0x010);
664 status &= ~0x010;
665 }
666
667 /* Unknown, not seen yet... 0x402000 is the only trap status reg
668 * remaining, so try to handle it anyway. Perhaps related to that
669 * unknown DMA slot on tesla? */
670 if (status & 0x20) {
671 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
672 if (display)
673 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
674 nv_wr32(dev, 0x402000, 0xc0000000);
675 /* no status modifiction on purpose */
676 }
677
678 /* TEXTURE: CUDA texturing units */
679 if (status & 0x040) {
680 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
681 "PGRAPH - TRAP_TEXTURE");
682 nv_wr32(dev, 0x400108, 0x040);
683 status &= ~0x040;
684 }
685
686 /* MP: CUDA execution engines. */
687 if (status & 0x080) {
688 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
689 "PGRAPH - TRAP_MP");
690 nv_wr32(dev, 0x400108, 0x080);
691 status &= ~0x080;
692 }
693
694 /* TPDMA: Handles TP-initiated uncached memory accesses:
695 * l[], g[], stack, 2d surfaces, render targets. */
696 if (status & 0x100) {
697 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
698 "PGRAPH - TRAP_TPDMA");
699 nv_wr32(dev, 0x400108, 0x100);
700 status &= ~0x100;
701 }
702
703 if (status) {
704 if (display)
705 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
706 nv_wr32(dev, 0x400108, status);
707 }
708
709 return 1;
710}
711
712int
713nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
714{
715 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
716 struct drm_nouveau_private *dev_priv = dev->dev_private;
717 struct nouveau_channel *chan;
718 unsigned long flags;
719 int i;
720
721 spin_lock_irqsave(&dev_priv->channels.lock, flags);
722 for (i = 0; i < pfifo->channels; i++) {
723 chan = dev_priv->channels.ptr[i];
724 if (!chan || !chan->ramin)
725 continue;
726
727 if (inst == chan->ramin->vinst)
728 break;
729 }
730 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
731 return i;
732}
733
734static void
735nv50_graph_isr(struct drm_device *dev)
736{
737 u32 stat;
738
739 while ((stat = nv_rd32(dev, 0x400100))) {
740 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
741 u32 chid = nv50_graph_isr_chid(dev, inst);
742 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
743 u32 subc = (addr & 0x00070000) >> 16;
744 u32 mthd = (addr & 0x00001ffc);
745 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
746 u32 class = nv_rd32(dev, 0x400814);
747 u32 show = stat;
748
749 if (stat & 0x00000010) {
750 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
751 mthd, data))
752 show &= ~0x00000010;
753 }
754
755 show = (show && nouveau_ratelimit()) ? show : 0;
756
757 if (show & 0x00100000) {
758 u32 ecode = nv_rd32(dev, 0x400110);
759 NV_INFO(dev, "PGRAPH - DATA_ERROR ");
760 nouveau_enum_print(nv50_data_error_names, ecode);
761 printk("\n");
762 }
763
764 if (stat & 0x00200000) {
765 if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
766 show &= ~0x00200000;
767 }
768
769 nv_wr32(dev, 0x400100, stat);
770 nv_wr32(dev, 0x400500, 0x00010001);
771
772 if (show) {
773 NV_INFO(dev, "PGRAPH -");
774 nouveau_bitfield_print(nv50_graph_intr, show);
775 printk("\n");
776 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
777 "class 0x%04x mthd 0x%04x data 0x%08x\n",
778 chid, inst, subc, class, mthd, data);
779 nv50_fb_vm_trap(dev, 1);
780 }
781 }
782
783 if (nv_rd32(dev, 0x400824) & (1 << 31))
784 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
785}
786
787static void
788nv50_graph_destroy(struct drm_device *dev, int engine)
789{
790 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
791
792 NVOBJ_ENGINE_DEL(dev, GR);
793
794 nouveau_irq_unregister(dev, 12);
795 kfree(pgraph);
796}
797
798int
799nv50_graph_create(struct drm_device *dev)
800{
801 struct drm_nouveau_private *dev_priv = dev->dev_private;
802 struct nv50_graph_engine *pgraph;
803 int ret;
804
805 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
806 if (!pgraph)
807 return -ENOMEM;
808
809 ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
810 &pgraph->ctxprog_size,
811 &pgraph->grctx_size);
812 if (ret) {
813 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
814 kfree(pgraph);
815 return 0;
816 }
817
818 pgraph->base.destroy = nv50_graph_destroy;
819 pgraph->base.init = nv50_graph_init;
820 pgraph->base.fini = nv50_graph_fini;
821 pgraph->base.context_new = nv50_graph_context_new;
822 pgraph->base.context_del = nv50_graph_context_del;
823 pgraph->base.object_new = nv50_graph_object_new;
824 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
825 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
826 else
827 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
828
829 nouveau_irq_register(dev, 12, nv50_graph_isr);
830
831 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
832 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
833 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
834 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
835
836 /* tesla */
837 if (dev_priv->chipset == 0x50)
838 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
839 else
840 if (dev_priv->chipset < 0xa0)
841 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
842 else {
843 switch (dev_priv->chipset) {
844 case 0xa0:
845 case 0xaa:
846 case 0xac:
847 NVOBJ_CLASS(dev, 0x8397, GR);
848 break;
849 case 0xa3:
850 case 0xa5:
851 case 0xa8:
852 NVOBJ_CLASS(dev, 0x8597, GR);
853 break;
854 case 0xaf:
855 NVOBJ_CLASS(dev, 0x8697, GR);
856 break;
857 }
858 }
859
860 /* compute */
861 NVOBJ_CLASS(dev, 0x50c0, GR);
862 if (dev_priv->chipset > 0xa0 &&
863 dev_priv->chipset != 0xaa &&
864 dev_priv->chipset != 0xac)
865 NVOBJ_CLASS(dev, 0x85c0, GR);
866
867 return 0;
868}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
deleted file mode 100644
index 0bba54f11800..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ /dev/null
@@ -1,428 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30
31#include "nouveau_drv.h"
32#include "nouveau_vm.h"
33
34#define BAR1_VM_BASE 0x0020000000ULL
35#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
36#define BAR3_VM_BASE 0x0000000000ULL
37#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
38
39struct nv50_instmem_priv {
40 uint32_t save1700[5]; /* 0x1700->0x1710 */
41
42 struct nouveau_gpuobj *bar1_dmaobj;
43 struct nouveau_gpuobj *bar3_dmaobj;
44};
45
46static void
47nv50_channel_del(struct nouveau_channel **pchan)
48{
49 struct nouveau_channel *chan;
50
51 chan = *pchan;
52 *pchan = NULL;
53 if (!chan)
54 return;
55
56 nouveau_gpuobj_ref(NULL, &chan->ramfc);
57 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
58 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
59 if (drm_mm_initialized(&chan->ramin_heap))
60 drm_mm_takedown(&chan->ramin_heap);
61 nouveau_gpuobj_ref(NULL, &chan->ramin);
62 kfree(chan);
63}
64
65static int
66nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
67 struct nouveau_channel **pchan)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
71 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
72 struct nouveau_channel *chan;
73 int ret, i;
74
75 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
76 if (!chan)
77 return -ENOMEM;
78 chan->dev = dev;
79
80 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
81 if (ret) {
82 nv50_channel_del(&chan);
83 return ret;
84 }
85
86 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
87 if (ret) {
88 nv50_channel_del(&chan);
89 return ret;
90 }
91
92 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
93 chan->ramin->pinst + pgd,
94 chan->ramin->vinst + pgd,
95 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
96 &chan->vm_pd);
97 if (ret) {
98 nv50_channel_del(&chan);
99 return ret;
100 }
101
102 for (i = 0; i < 0x4000; i += 8) {
103 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
104 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
105 }
106
107 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
108 if (ret) {
109 nv50_channel_del(&chan);
110 return ret;
111 }
112
113 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
114 chan->ramin->pinst + fc,
115 chan->ramin->vinst + fc, 0x100,
116 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
117 if (ret) {
118 nv50_channel_del(&chan);
119 return ret;
120 }
121
122 *pchan = chan;
123 return 0;
124}
125
126int
127nv50_instmem_init(struct drm_device *dev)
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 struct nv50_instmem_priv *priv;
131 struct nouveau_channel *chan;
132 struct nouveau_vm *vm;
133 int ret, i;
134 u32 tmp;
135
136 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
137 if (!priv)
138 return -ENOMEM;
139 dev_priv->engine.instmem.priv = priv;
140
141 /* Save state, will restore at takedown. */
142 for (i = 0x1700; i <= 0x1710; i += 4)
143 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
144
145 /* Global PRAMIN heap */
146 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
147 if (ret) {
148 NV_ERROR(dev, "Failed to init RAMIN heap\n");
149 goto error;
150 }
151
152 /* BAR3 */
153 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
154 &dev_priv->bar3_vm);
155 if (ret)
156 goto error;
157
158 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
159 0x1000, NVOBJ_FLAG_DONT_MAP |
160 NVOBJ_FLAG_ZERO_ALLOC,
161 &dev_priv->bar3_vm->pgt[0].obj[0]);
162 if (ret)
163 goto error;
164 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
165
166 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
167
168 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
169 if (ret)
170 goto error;
171 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
172
173 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
174 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
175 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
176 &priv->bar3_dmaobj);
177 if (ret)
178 goto error;
179
180 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
181 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
182 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
183
184 dev_priv->engine.instmem.flush(dev);
185 dev_priv->ramin_available = true;
186
187 tmp = nv_ro32(chan->ramin, 0);
188 nv_wo32(chan->ramin, 0, ~tmp);
189 if (nv_ro32(chan->ramin, 0) != ~tmp) {
190 NV_ERROR(dev, "PRAMIN readback failed\n");
191 ret = -EIO;
192 goto error;
193 }
194 nv_wo32(chan->ramin, 0, tmp);
195
196 /* BAR1 */
197 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
198 if (ret)
199 goto error;
200
201 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
202 if (ret)
203 goto error;
204 nouveau_vm_ref(NULL, &vm, NULL);
205
206 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
207 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
208 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
209 &priv->bar1_dmaobj);
210 if (ret)
211 goto error;
212
213 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
214 for (i = 0; i < 8; i++)
215 nv_wr32(dev, 0x1900 + (i*4), 0);
216
217 /* Create shared channel VM, space is reserved at the beginning
218 * to catch "NULL pointer" references
219 */
220 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
221 &dev_priv->chan_vm);
222 if (ret)
223 return ret;
224
225 return 0;
226
227error:
228 nv50_instmem_takedown(dev);
229 return ret;
230}
231
232void
233nv50_instmem_takedown(struct drm_device *dev)
234{
235 struct drm_nouveau_private *dev_priv = dev->dev_private;
236 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
237 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
238 int i;
239
240 NV_DEBUG(dev, "\n");
241
242 if (!priv)
243 return;
244
245 dev_priv->ramin_available = false;
246
247 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
248
249 for (i = 0x1700; i <= 0x1710; i += 4)
250 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
251
252 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
253 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
254
255 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
256 dev_priv->channels.ptr[127] = 0;
257 nv50_channel_del(&dev_priv->channels.ptr[0]);
258
259 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
260 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
261
262 if (drm_mm_initialized(&dev_priv->ramin_heap))
263 drm_mm_takedown(&dev_priv->ramin_heap);
264
265 dev_priv->engine.instmem.priv = NULL;
266 kfree(priv);
267}
268
269int
270nv50_instmem_suspend(struct drm_device *dev)
271{
272 struct drm_nouveau_private *dev_priv = dev->dev_private;
273
274 dev_priv->ramin_available = false;
275 return 0;
276}
277
278void
279nv50_instmem_resume(struct drm_device *dev)
280{
281 struct drm_nouveau_private *dev_priv = dev->dev_private;
282 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
283 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
284 int i;
285
286 /* Poke the relevant regs, and pray it works :) */
287 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
288 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
289 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
290 NV50_PUNK_BAR_CFG_BASE_VALID);
291 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
292 NV50_PUNK_BAR1_CTXDMA_VALID);
293 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
294 NV50_PUNK_BAR3_CTXDMA_VALID);
295
296 for (i = 0; i < 8; i++)
297 nv_wr32(dev, 0x1900 + (i*4), 0);
298
299 dev_priv->ramin_available = true;
300}
301
302struct nv50_gpuobj_node {
303 struct nouveau_mem *vram;
304 struct nouveau_vma chan_vma;
305 u32 align;
306};
307
308int
309nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
310 u32 size, u32 align)
311{
312 struct drm_device *dev = gpuobj->dev;
313 struct drm_nouveau_private *dev_priv = dev->dev_private;
314 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
315 struct nv50_gpuobj_node *node = NULL;
316 int ret;
317
318 node = kzalloc(sizeof(*node), GFP_KERNEL);
319 if (!node)
320 return -ENOMEM;
321 node->align = align;
322
323 size = (size + 4095) & ~4095;
324 align = max(align, (u32)4096);
325
326 ret = vram->get(dev, size, align, 0, 0, &node->vram);
327 if (ret) {
328 kfree(node);
329 return ret;
330 }
331
332 gpuobj->vinst = node->vram->offset;
333
334 if (gpuobj->flags & NVOBJ_FLAG_VM) {
335 u32 flags = NV_MEM_ACCESS_RW;
336 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
337 flags |= NV_MEM_ACCESS_SYS;
338
339 ret = nouveau_vm_get(chan->vm, size, 12, flags,
340 &node->chan_vma);
341 if (ret) {
342 vram->put(dev, &node->vram);
343 kfree(node);
344 return ret;
345 }
346
347 nouveau_vm_map(&node->chan_vma, node->vram);
348 gpuobj->linst = node->chan_vma.offset;
349 }
350
351 gpuobj->size = size;
352 gpuobj->node = node;
353 return 0;
354}
355
356void
357nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
358{
359 struct drm_device *dev = gpuobj->dev;
360 struct drm_nouveau_private *dev_priv = dev->dev_private;
361 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
362 struct nv50_gpuobj_node *node;
363
364 node = gpuobj->node;
365 gpuobj->node = NULL;
366
367 if (node->chan_vma.node) {
368 nouveau_vm_unmap(&node->chan_vma);
369 nouveau_vm_put(&node->chan_vma);
370 }
371 vram->put(dev, &node->vram);
372 kfree(node);
373}
374
375int
376nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
377{
378 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
379 struct nv50_gpuobj_node *node = gpuobj->node;
380 int ret;
381
382 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
383 NV_MEM_ACCESS_RW, &node->vram->bar_vma);
384 if (ret)
385 return ret;
386
387 nouveau_vm_map(&node->vram->bar_vma, node->vram);
388 gpuobj->pinst = node->vram->bar_vma.offset;
389 return 0;
390}
391
392void
393nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
394{
395 struct nv50_gpuobj_node *node = gpuobj->node;
396
397 if (node->vram->bar_vma.node) {
398 nouveau_vm_unmap(&node->vram->bar_vma);
399 nouveau_vm_put(&node->vram->bar_vma);
400 }
401}
402
403void
404nv50_instmem_flush(struct drm_device *dev)
405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 unsigned long flags;
408
409 spin_lock_irqsave(&dev_priv->vm_lock, flags);
410 nv_wr32(dev, 0x00330c, 0x00000001);
411 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
412 NV_ERROR(dev, "PRAMIN flush timeout\n");
413 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
414}
415
416void
417nv84_instmem_flush(struct drm_device *dev)
418{
419 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 unsigned long flags;
421
422 spin_lock_irqsave(&dev_priv->vm_lock, flags);
423 nv_wr32(dev, 0x070000, 0x00000001);
424 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
425 NV_ERROR(dev, "PRAMIN flush timeout\n");
426 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
427}
428
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
deleted file mode 100644
index e0a9c3faa202..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mc.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31int
32nv50_mc_init(struct drm_device *dev)
33{
34 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
35 return 0;
36}
37
38void nv50_mc_takedown(struct drm_device *dev)
39{
40}
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
deleted file mode 100644
index 90e8ed22cfcb..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ /dev/null
@@ -1,241 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv50_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static inline u32
34CTX_PTR(struct drm_device *dev, u32 offset)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37
38 if (dev_priv->chipset == 0x50)
39 offset += 0x0260;
40 else
41 offset += 0x0060;
42
43 return offset;
44}
45
46static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct drm_device *dev = chan->dev;
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_gpuobj *ramin = chan->ramin;
52 struct nouveau_gpuobj *ctx = NULL;
53 int ret;
54
55 NV_DEBUG(dev, "ch%d\n", chan->id);
56
57 ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &ctx);
59 if (ret)
60 return ret;
61
62 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
63 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
64 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
65 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
67 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
68
69 nv_wo32(ctx, 0x70, 0x00801ec1);
70 nv_wo32(ctx, 0x7c, 0x0000037c);
71 dev_priv->engine.instmem.flush(dev);
72
73 chan->engctx[engine] = ctx;
74 return 0;
75}
76
77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{
80 struct nouveau_gpuobj *ctx = chan->engctx[engine];
81 struct drm_device *dev = chan->dev;
82 int i;
83
84 for (i = 0x00; i <= 0x14; i += 4)
85 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
86
87 nouveau_gpuobj_ref(NULL, &ctx);
88 chan->engctx[engine] = NULL;
89}
90
91static int
92nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL;
98 int ret;
99
100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
101 if (ret)
102 return ret;
103 obj->engine = 2;
104 obj->class = class;
105
106 nv_wo32(obj, 0x00, class);
107 nv_wo32(obj, 0x04, 0x00000000);
108 nv_wo32(obj, 0x08, 0x00000000);
109 nv_wo32(obj, 0x0c, 0x00000000);
110 dev_priv->engine.instmem.flush(dev);
111
112 ret = nouveau_ramht_insert(chan, handle, obj);
113 nouveau_gpuobj_ref(NULL, &obj);
114 return ret;
115}
116
117static void
118nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
119{
120 nv50_vm_flush_engine(dev, 0x08);
121}
122
123static int
124nv50_mpeg_init(struct drm_device *dev, int engine)
125{
126 nv_wr32(dev, 0x00b32c, 0x00000000);
127 nv_wr32(dev, 0x00b314, 0x00000100);
128 nv_wr32(dev, 0x00b0e0, 0x0000001a);
129
130 nv_wr32(dev, 0x00b220, 0x00000044);
131 nv_wr32(dev, 0x00b300, 0x00801ec1);
132 nv_wr32(dev, 0x00b390, 0x00000000);
133 nv_wr32(dev, 0x00b394, 0x00000000);
134 nv_wr32(dev, 0x00b398, 0x00000000);
135 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
136
137 nv_wr32(dev, 0x00b100, 0xffffffff);
138 nv_wr32(dev, 0x00b140, 0xffffffff);
139
140 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
141 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
142 return -EBUSY;
143 }
144
145 return 0;
146}
147
148static int
149nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
150{
151 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
152 nv_wr32(dev, 0x00b140, 0x00000000);
153 return 0;
154}
155
156static void
157nv50_mpeg_isr(struct drm_device *dev)
158{
159 u32 stat = nv_rd32(dev, 0x00b100);
160 u32 type = nv_rd32(dev, 0x00b230);
161 u32 mthd = nv_rd32(dev, 0x00b234);
162 u32 data = nv_rd32(dev, 0x00b238);
163 u32 show = stat;
164
165 if (stat & 0x01000000) {
166 /* happens on initial binding of the object */
167 if (type == 0x00000020 && mthd == 0x0000) {
168 nv_wr32(dev, 0x00b308, 0x00000100);
169 show &= ~0x01000000;
170 }
171 }
172
173 if (show && nouveau_ratelimit()) {
174 NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
175 stat, type, mthd, data);
176 }
177
178 nv_wr32(dev, 0x00b100, stat);
179 nv_wr32(dev, 0x00b230, 0x00000001);
180 nv50_fb_vm_trap(dev, 1);
181}
182
183static void
184nv50_vpe_isr(struct drm_device *dev)
185{
186 if (nv_rd32(dev, 0x00b100))
187 nv50_mpeg_isr(dev);
188
189 if (nv_rd32(dev, 0x00b800)) {
190 u32 stat = nv_rd32(dev, 0x00b800);
191 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
192 nv_wr32(dev, 0xb800, stat);
193 }
194}
195
196static void
197nv50_mpeg_destroy(struct drm_device *dev, int engine)
198{
199 struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
200
201 nouveau_irq_unregister(dev, 0);
202
203 NVOBJ_ENGINE_DEL(dev, MPEG);
204 kfree(pmpeg);
205}
206
207int
208nv50_mpeg_create(struct drm_device *dev)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nv50_mpeg_engine *pmpeg;
212
213 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
214 if (!pmpeg)
215 return -ENOMEM;
216
217 pmpeg->base.destroy = nv50_mpeg_destroy;
218 pmpeg->base.init = nv50_mpeg_init;
219 pmpeg->base.fini = nv50_mpeg_fini;
220 pmpeg->base.context_new = nv50_mpeg_context_new;
221 pmpeg->base.context_del = nv50_mpeg_context_del;
222 pmpeg->base.object_new = nv50_mpeg_object_new;
223 pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
224
225 if (dev_priv->chipset == 0x50) {
226 nouveau_irq_register(dev, 0, nv50_vpe_isr);
227 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
228 NVOBJ_CLASS(dev, 0x3174, MPEG);
229#if 0
230 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
231 NVOBJ_CLASS(dev, 0x4075, ME);
232#endif
233 } else {
234 nouveau_irq_register(dev, 0, nv50_mpeg_isr);
235 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
236 NVOBJ_CLASS(dev, 0x8274, MPEG);
237 }
238
239 return 0;
240
241}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index d020ed4979b4..7724eae5db98 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -23,13 +23,19 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_hw.h" 28#include "nouveau_hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h" 30#include "nouveau_hwsq.h"
31
31#include "nv50_display.h" 32#include "nv50_display.h"
32 33
34#include <subdev/bios/pll.h>
35#include <subdev/clock.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38
33enum clk_src { 39enum clk_src {
34 clk_src_crystal, 40 clk_src_crystal,
35 clk_src_href, 41 clk_src_href,
@@ -49,19 +55,20 @@ static u32 read_clk(struct drm_device *, enum clk_src);
49static u32 55static u32
50read_div(struct drm_device *dev) 56read_div(struct drm_device *dev)
51{ 57{
52 struct drm_nouveau_private *dev_priv = dev->dev_private; 58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_drm *drm = nouveau_drm(dev);
53 60
54 switch (dev_priv->chipset) { 61 switch (nv_device(drm->device)->chipset) {
55 case 0x50: /* it exists, but only has bit 31, not the dividers.. */ 62 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
56 case 0x84: 63 case 0x84:
57 case 0x86: 64 case 0x86:
58 case 0x98: 65 case 0x98:
59 case 0xa0: 66 case 0xa0:
60 return nv_rd32(dev, 0x004700); 67 return nv_rd32(device, 0x004700);
61 case 0x92: 68 case 0x92:
62 case 0x94: 69 case 0x94:
63 case 0x96: 70 case 0x96:
64 return nv_rd32(dev, 0x004800); 71 return nv_rd32(device, 0x004800);
65 default: 72 default:
66 return 0x00000000; 73 return 0x00000000;
67 } 74 }
@@ -70,12 +77,13 @@ read_div(struct drm_device *dev)
70static u32 77static u32
71read_pll_src(struct drm_device *dev, u32 base) 78read_pll_src(struct drm_device *dev, u32 base)
72{ 79{
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct nouveau_device *device = nouveau_dev(dev);
81 struct nouveau_drm *drm = nouveau_drm(dev);
74 u32 coef, ref = read_clk(dev, clk_src_crystal); 82 u32 coef, ref = read_clk(dev, clk_src_crystal);
75 u32 rsel = nv_rd32(dev, 0x00e18c); 83 u32 rsel = nv_rd32(device, 0x00e18c);
76 int P, N, M, id; 84 int P, N, M, id;
77 85
78 switch (dev_priv->chipset) { 86 switch (nv_device(drm->device)->chipset) {
79 case 0x50: 87 case 0x50:
80 case 0xa0: 88 case 0xa0:
81 switch (base) { 89 switch (base) {
@@ -84,11 +92,11 @@ read_pll_src(struct drm_device *dev, u32 base)
84 case 0x4008: id = !!(rsel & 0x00000008); break; 92 case 0x4008: id = !!(rsel & 0x00000008); break;
85 case 0x4030: id = 0; break; 93 case 0x4030: id = 0; break;
86 default: 94 default:
87 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 95 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
88 return 0; 96 return 0;
89 } 97 }
90 98
91 coef = nv_rd32(dev, 0x00e81c + (id * 0x0c)); 99 coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
92 ref *= (coef & 0x01000000) ? 2 : 4; 100 ref *= (coef & 0x01000000) ? 2 : 4;
93 P = (coef & 0x00070000) >> 16; 101 P = (coef & 0x00070000) >> 16;
94 N = ((coef & 0x0000ff00) >> 8) + 1; 102 N = ((coef & 0x0000ff00) >> 8) + 1;
@@ -97,7 +105,7 @@ read_pll_src(struct drm_device *dev, u32 base)
97 case 0x84: 105 case 0x84:
98 case 0x86: 106 case 0x86:
99 case 0x92: 107 case 0x92:
100 coef = nv_rd32(dev, 0x00e81c); 108 coef = nv_rd32(device, 0x00e81c);
101 P = (coef & 0x00070000) >> 16; 109 P = (coef & 0x00070000) >> 16;
102 N = (coef & 0x0000ff00) >> 8; 110 N = (coef & 0x0000ff00) >> 8;
103 M = (coef & 0x000000ff) >> 0; 111 M = (coef & 0x000000ff) >> 0;
@@ -105,14 +113,14 @@ read_pll_src(struct drm_device *dev, u32 base)
105 case 0x94: 113 case 0x94:
106 case 0x96: 114 case 0x96:
107 case 0x98: 115 case 0x98:
108 rsel = nv_rd32(dev, 0x00c050); 116 rsel = nv_rd32(device, 0x00c050);
109 switch (base) { 117 switch (base) {
110 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; 118 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
111 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; 119 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
112 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; 120 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
113 case 0x4030: rsel = 3; break; 121 case 0x4030: rsel = 3; break;
114 default: 122 default:
115 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 123 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
116 return 0; 124 return 0;
117 } 125 }
118 126
@@ -123,8 +131,8 @@ read_pll_src(struct drm_device *dev, u32 base)
123 case 3: id = 0; break; 131 case 3: id = 0; break;
124 } 132 }
125 133
126 coef = nv_rd32(dev, 0x00e81c + (id * 0x28)); 134 coef = nv_rd32(device, 0x00e81c + (id * 0x28));
127 P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7; 135 P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
128 P += (coef & 0x00070000) >> 16; 136 P += (coef & 0x00070000) >> 16;
129 N = (coef & 0x0000ff00) >> 8; 137 N = (coef & 0x0000ff00) >> 8;
130 M = (coef & 0x000000ff) >> 0; 138 M = (coef & 0x000000ff) >> 0;
@@ -141,7 +149,9 @@ read_pll_src(struct drm_device *dev, u32 base)
141static u32 149static u32
142read_pll_ref(struct drm_device *dev, u32 base) 150read_pll_ref(struct drm_device *dev, u32 base)
143{ 151{
144 u32 src, mast = nv_rd32(dev, 0x00c040); 152 struct nouveau_device *device = nouveau_dev(dev);
153 struct nouveau_drm *drm = nouveau_drm(dev);
154 u32 src, mast = nv_rd32(device, 0x00c040);
145 155
146 switch (base) { 156 switch (base) {
147 case 0x004028: 157 case 0x004028:
@@ -159,7 +169,7 @@ read_pll_ref(struct drm_device *dev, u32 base)
159 case 0x00e810: 169 case 0x00e810:
160 return read_clk(dev, clk_src_crystal); 170 return read_clk(dev, clk_src_crystal);
161 default: 171 default:
162 NV_ERROR(dev, "bad pll 0x%06x\n", base); 172 NV_ERROR(drm, "bad pll 0x%06x\n", base);
163 return 0; 173 return 0;
164 } 174 }
165 175
@@ -171,17 +181,18 @@ read_pll_ref(struct drm_device *dev, u32 base)
171static u32 181static u32
172read_pll(struct drm_device *dev, u32 base) 182read_pll(struct drm_device *dev, u32 base)
173{ 183{
174 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct nouveau_device *device = nouveau_dev(dev);
175 u32 mast = nv_rd32(dev, 0x00c040); 185 struct nouveau_drm *drm = nouveau_drm(dev);
176 u32 ctrl = nv_rd32(dev, base + 0); 186 u32 mast = nv_rd32(device, 0x00c040);
177 u32 coef = nv_rd32(dev, base + 4); 187 u32 ctrl = nv_rd32(device, base + 0);
188 u32 coef = nv_rd32(device, base + 4);
178 u32 ref = read_pll_ref(dev, base); 189 u32 ref = read_pll_ref(dev, base);
179 u32 clk = 0; 190 u32 clk = 0;
180 int N1, N2, M1, M2; 191 int N1, N2, M1, M2;
181 192
182 if (base == 0x004028 && (mast & 0x00100000)) { 193 if (base == 0x004028 && (mast & 0x00100000)) {
183 /* wtf, appears to only disable post-divider on nva0 */ 194 /* wtf, appears to only disable post-divider on nva0 */
184 if (dev_priv->chipset != 0xa0) 195 if (nv_device(drm->device)->chipset != 0xa0)
185 return read_clk(dev, clk_src_dom6); 196 return read_clk(dev, clk_src_dom6);
186 } 197 }
187 198
@@ -205,13 +216,14 @@ read_pll(struct drm_device *dev, u32 base)
205static u32 216static u32
206read_clk(struct drm_device *dev, enum clk_src src) 217read_clk(struct drm_device *dev, enum clk_src src)
207{ 218{
208 struct drm_nouveau_private *dev_priv = dev->dev_private; 219 struct nouveau_device *device = nouveau_dev(dev);
209 u32 mast = nv_rd32(dev, 0x00c040); 220 struct nouveau_drm *drm = nouveau_drm(dev);
221 u32 mast = nv_rd32(device, 0x00c040);
210 u32 P = 0; 222 u32 P = 0;
211 223
212 switch (src) { 224 switch (src) {
213 case clk_src_crystal: 225 case clk_src_crystal:
214 return dev_priv->crystal; 226 return device->crystal;
215 case clk_src_href: 227 case clk_src_href:
216 return 100000; /* PCIE reference clock */ 228 return 100000; /* PCIE reference clock */
217 case clk_src_hclk: 229 case clk_src_hclk:
@@ -230,7 +242,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
230 break; 242 break;
231 case clk_src_nvclk: 243 case clk_src_nvclk:
232 if (!(mast & 0x00100000)) 244 if (!(mast & 0x00100000))
233 P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16; 245 P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
234 switch (mast & 0x00000003) { 246 switch (mast & 0x00000003) {
235 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P; 247 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
236 case 0x00000001: return read_clk(dev, clk_src_dom6); 248 case 0x00000001: return read_clk(dev, clk_src_dom6);
@@ -239,7 +251,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
239 } 251 }
240 break; 252 break;
241 case clk_src_sclk: 253 case clk_src_sclk:
242 P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16; 254 P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
243 switch (mast & 0x00000030) { 255 switch (mast & 0x00000030) {
244 case 0x00000000: 256 case 0x00000000:
245 if (mast & 0x00000080) 257 if (mast & 0x00000080)
@@ -251,8 +263,8 @@ read_clk(struct drm_device *dev, enum clk_src src)
251 } 263 }
252 break; 264 break;
253 case clk_src_mclk: 265 case clk_src_mclk:
254 P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16; 266 P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
255 if (nv_rd32(dev, 0x004008) & 0x00000200) { 267 if (nv_rd32(device, 0x004008) & 0x00000200) {
256 switch (mast & 0x0000c000) { 268 switch (mast & 0x0000c000) {
257 case 0x00000000: 269 case 0x00000000:
258 return read_clk(dev, clk_src_crystal) >> P; 270 return read_clk(dev, clk_src_crystal) >> P;
@@ -266,7 +278,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
266 break; 278 break;
267 case clk_src_vdec: 279 case clk_src_vdec:
268 P = (read_div(dev) & 0x00000700) >> 8; 280 P = (read_div(dev) & 0x00000700) >> 8;
269 switch (dev_priv->chipset) { 281 switch (nv_device(drm->device)->chipset) {
270 case 0x84: 282 case 0x84:
271 case 0x86: 283 case 0x86:
272 case 0x92: 284 case 0x92:
@@ -275,7 +287,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
275 case 0xa0: 287 case 0xa0:
276 switch (mast & 0x00000c00) { 288 switch (mast & 0x00000c00) {
277 case 0x00000000: 289 case 0x00000000:
278 if (dev_priv->chipset == 0xa0) /* wtf?? */ 290 if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
279 return read_clk(dev, clk_src_nvclk) >> P; 291 return read_clk(dev, clk_src_nvclk) >> P;
280 return read_clk(dev, clk_src_crystal) >> P; 292 return read_clk(dev, clk_src_crystal) >> P;
281 case 0x00000400: 293 case 0x00000400:
@@ -303,7 +315,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
303 } 315 }
304 break; 316 break;
305 case clk_src_dom6: 317 case clk_src_dom6:
306 switch (dev_priv->chipset) { 318 switch (nv_device(drm->device)->chipset) {
307 case 0x50: 319 case 0x50:
308 case 0xa0: 320 case 0xa0:
309 return read_pll(dev, 0x00e810) >> 2; 321 return read_pll(dev, 0x00e810) >> 2;
@@ -329,22 +341,22 @@ read_clk(struct drm_device *dev, enum clk_src src)
329 break; 341 break;
330 } 342 }
331 343
332 NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast); 344 NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
333 return 0; 345 return 0;
334} 346}
335 347
336int 348int
337nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 349nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
338{ 350{
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 351 struct nouveau_drm *drm = nouveau_drm(dev);
340 if (dev_priv->chipset == 0xaa || 352 if (nv_device(drm->device)->chipset == 0xaa ||
341 dev_priv->chipset == 0xac) 353 nv_device(drm->device)->chipset == 0xac)
342 return 0; 354 return 0;
343 355
344 perflvl->core = read_clk(dev, clk_src_nvclk); 356 perflvl->core = read_clk(dev, clk_src_nvclk);
345 perflvl->shader = read_clk(dev, clk_src_sclk); 357 perflvl->shader = read_clk(dev, clk_src_sclk);
346 perflvl->memory = read_clk(dev, clk_src_mclk); 358 perflvl->memory = read_clk(dev, clk_src_mclk);
347 if (dev_priv->chipset != 0x50) { 359 if (nv_device(drm->device)->chipset != 0x50) {
348 perflvl->vdec = read_clk(dev, clk_src_vdec); 360 perflvl->vdec = read_clk(dev, clk_src_vdec);
349 perflvl->dom6 = read_clk(dev, clk_src_dom6); 361 perflvl->dom6 = read_clk(dev, clk_src_dom6);
350 } 362 }
@@ -363,22 +375,25 @@ struct nv50_pm_state {
363}; 375};
364 376
365static u32 377static u32
366calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, 378calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
367 u32 clk, int *N1, int *M1, int *log2P) 379 u32 clk, int *N1, int *M1, int *log2P)
368{ 380{
381 struct nouveau_device *device = nouveau_dev(dev);
382 struct nouveau_bios *bios = nouveau_bios(device);
383 struct nouveau_clock *pclk = nouveau_clock(device);
369 struct nouveau_pll_vals coef; 384 struct nouveau_pll_vals coef;
370 int ret; 385 int ret;
371 386
372 ret = get_pll_limits(dev, reg, pll); 387 ret = nvbios_pll_parse(bios, reg, pll);
373 if (ret) 388 if (ret)
374 return 0; 389 return 0;
375 390
376 pll->vco2.maxfreq = 0; 391 pll->vco2.max_freq = 0;
377 pll->refclk = read_pll_ref(dev, reg); 392 pll->refclk = read_pll_ref(dev, reg);
378 if (!pll->refclk) 393 if (!pll->refclk)
379 return 0; 394 return 0;
380 395
381 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); 396 ret = pclk->pll_calc(pclk, pll, clk, &coef);
382 if (ret == 0) 397 if (ret == 0)
383 return 0; 398 return 0;
384 399
@@ -461,27 +476,29 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
461static u32 476static u32
462mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 477mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
463{ 478{
479 struct nouveau_device *device = nouveau_dev(exec->dev);
464 if (mr <= 1) 480 if (mr <= 1)
465 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); 481 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
466 if (mr <= 3) 482 if (mr <= 3)
467 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); 483 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
468 return 0; 484 return 0;
469} 485}
470 486
471static void 487static void
472mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 488mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
473{ 489{
474 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 490 struct nouveau_device *device = nouveau_dev(exec->dev);
491 struct nouveau_fb *pfb = nouveau_fb(device);
475 struct nv50_pm_state *info = exec->priv; 492 struct nv50_pm_state *info = exec->priv;
476 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 493 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
477 494
478 if (mr <= 1) { 495 if (mr <= 1) {
479 if (dev_priv->vram_rank_B) 496 if (pfb->ram.ranks > 1)
480 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data); 497 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
481 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data); 498 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
482 } else 499 } else
483 if (mr <= 3) { 500 if (mr <= 3) {
484 if (dev_priv->vram_rank_B) 501 if (pfb->ram.ranks > 1)
485 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data); 502 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
486 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data); 503 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
487 } 504 }
@@ -490,11 +507,12 @@ mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
490static void 507static void
491mclk_clock_set(struct nouveau_mem_exec_func *exec) 508mclk_clock_set(struct nouveau_mem_exec_func *exec)
492{ 509{
510 struct nouveau_device *device = nouveau_dev(exec->dev);
493 struct nv50_pm_state *info = exec->priv; 511 struct nv50_pm_state *info = exec->priv;
494 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 512 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
495 u32 ctrl = nv_rd32(exec->dev, 0x004008); 513 u32 ctrl = nv_rd32(device, 0x004008);
496 514
497 info->mmast = nv_rd32(exec->dev, 0x00c040); 515 info->mmast = nv_rd32(device, 0x00c040);
498 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */ 516 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
499 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ 517 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
500 518
@@ -508,7 +526,7 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
508static void 526static void
509mclk_timing_set(struct nouveau_mem_exec_func *exec) 527mclk_timing_set(struct nouveau_mem_exec_func *exec)
510{ 528{
511 struct drm_device *dev = exec->dev; 529 struct nouveau_device *device = nouveau_dev(exec->dev);
512 struct nv50_pm_state *info = exec->priv; 530 struct nv50_pm_state *info = exec->priv;
513 struct nouveau_pm_level *perflvl = info->perflvl; 531 struct nouveau_pm_level *perflvl = info->perflvl;
514 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 532 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
@@ -516,7 +534,7 @@ mclk_timing_set(struct nouveau_mem_exec_func *exec)
516 534
517 for (i = 0; i < 9; i++) { 535 for (i = 0; i < 9; i++) {
518 u32 reg = 0x100220 + (i * 4); 536 u32 reg = 0x100220 + (i * 4);
519 u32 val = nv_rd32(dev, reg); 537 u32 val = nv_rd32(device, reg);
520 if (val != perflvl->timing.reg[i]) 538 if (val != perflvl->timing.reg[i])
521 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]); 539 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
522 } 540 }
@@ -526,7 +544,8 @@ static int
526calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, 544calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
527 struct nv50_pm_state *info) 545 struct nv50_pm_state *info)
528{ 546{
529 struct drm_nouveau_private *dev_priv = dev->dev_private; 547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev);
530 u32 crtc_mask = nv50_display_active_crtcs(dev); 549 u32 crtc_mask = nv50_display_active_crtcs(dev);
531 struct nouveau_mem_exec_func exec = { 550 struct nouveau_mem_exec_func exec = {
532 .dev = dev, 551 .dev = dev,
@@ -542,22 +561,22 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
542 .priv = info 561 .priv = info
543 }; 562 };
544 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 563 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
545 struct pll_lims pll; 564 struct nvbios_pll pll;
546 int N, M, P; 565 int N, M, P;
547 int ret; 566 int ret;
548 567
549 /* use pcie refclock if possible, otherwise use mpll */ 568 /* use pcie refclock if possible, otherwise use mpll */
550 info->mctrl = nv_rd32(dev, 0x004008); 569 info->mctrl = nv_rd32(device, 0x004008);
551 info->mctrl &= ~0x81ff0200; 570 info->mctrl &= ~0x81ff0200;
552 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) { 571 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
553 info->mctrl |= 0x00000200 | (pll.log2p_bias << 19); 572 info->mctrl |= 0x00000200 | (pll.bias_p << 19);
554 } else { 573 } else {
555 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P); 574 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
556 if (ret == 0) 575 if (ret == 0)
557 return -EINVAL; 576 return -EINVAL;
558 577
559 info->mctrl |= 0x80000000 | (P << 22) | (P << 16); 578 info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
560 info->mctrl |= pll.log2p_bias << 19; 579 info->mctrl |= pll.bias_p << 19;
561 info->mcoef = (N << 8) | M; 580 info->mcoef = (N << 8) | M;
562 } 581 }
563 582
@@ -567,7 +586,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
567 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */ 586 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
568 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */ 587 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
569 } 588 }
570 if (dev_priv->chipset >= 0x92) 589 if (nv_device(drm->device)->chipset >= 0x92)
571 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */ 590 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
572 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ 591 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
573 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ 592 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
@@ -578,7 +597,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
578 597
579 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ 598 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
580 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ 599 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
581 if (dev_priv->chipset >= 0x92) 600 if (nv_device(drm->device)->chipset >= 0x92)
582 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */ 601 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
583 hwsq_fini(hwsq); 602 hwsq_fini(hwsq);
584 return 0; 603 return 0;
@@ -587,16 +606,17 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
587void * 606void *
588nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 607nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
589{ 608{
590 struct drm_nouveau_private *dev_priv = dev->dev_private; 609 struct nouveau_device *device = nouveau_dev(dev);
610 struct nouveau_drm *drm = nouveau_drm(dev);
591 struct nv50_pm_state *info; 611 struct nv50_pm_state *info;
592 struct hwsq_ucode *hwsq; 612 struct hwsq_ucode *hwsq;
593 struct pll_lims pll; 613 struct nvbios_pll pll;
594 u32 out, mast, divs, ctrl; 614 u32 out, mast, divs, ctrl;
595 int clk, ret = -EINVAL; 615 int clk, ret = -EINVAL;
596 int N, M, P1, P2; 616 int N, M, P1, P2;
597 617
598 if (dev_priv->chipset == 0xaa || 618 if (nv_device(drm->device)->chipset == 0xaa ||
599 dev_priv->chipset == 0xac) 619 nv_device(drm->device)->chipset == 0xac)
600 return ERR_PTR(-ENODEV); 620 return ERR_PTR(-ENODEV);
601 621
602 info = kmalloc(sizeof(*info), GFP_KERNEL); 622 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -645,7 +665,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
645 clk = calc_div(perflvl->core, perflvl->vdec, &P1); 665 clk = calc_div(perflvl->core, perflvl->vdec, &P1);
646 666
647 /* see how close we can get using xpll/hclk as a source */ 667 /* see how close we can get using xpll/hclk as a source */
648 if (dev_priv->chipset != 0x98) 668 if (nv_device(drm->device)->chipset != 0x98)
649 out = read_pll(dev, 0x004030); 669 out = read_pll(dev, 0x004030);
650 else 670 else
651 out = read_clk(dev, clk_src_hclkm3d2); 671 out = read_clk(dev, clk_src_hclkm3d2);
@@ -654,7 +674,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
654 /* select whichever gets us closest */ 674 /* select whichever gets us closest */
655 if (abs((int)perflvl->vdec - clk) <= 675 if (abs((int)perflvl->vdec - clk) <=
656 abs((int)perflvl->vdec - out)) { 676 abs((int)perflvl->vdec - out)) {
657 if (dev_priv->chipset != 0x98) 677 if (nv_device(drm->device)->chipset != 0x98)
658 mast |= 0x00000c00; 678 mast |= 0x00000c00;
659 divs |= P1 << 8; 679 divs |= P1 << 8;
660 } else { 680 } else {
@@ -682,7 +702,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
682 } 702 }
683 703
684 /* vdec/dom6: complete switch to new clocks */ 704 /* vdec/dom6: complete switch to new clocks */
685 switch (dev_priv->chipset) { 705 switch (nv_device(drm->device)->chipset) {
686 case 0x92: 706 case 0x92:
687 case 0x94: 707 case 0x94:
688 case 0x96: 708 case 0x96:
@@ -698,7 +718,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
698 /* core/shader: make sure sclk/nvclk are disconnected from their 718 /* core/shader: make sure sclk/nvclk are disconnected from their
699 * PLLs (nvclk to dom6, sclk to hclk) 719 * PLLs (nvclk to dom6, sclk to hclk)
700 */ 720 */
701 if (dev_priv->chipset < 0x92) 721 if (nv_device(drm->device)->chipset < 0x92)
702 mast = (mast & ~0x001000b0) | 0x00100080; 722 mast = (mast & ~0x001000b0) | 0x00100080;
703 else 723 else
704 mast = (mast & ~0x000000b3) | 0x00000081; 724 mast = (mast & ~0x000000b3) | 0x00000081;
@@ -710,7 +730,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
710 if (clk == 0) 730 if (clk == 0)
711 goto error; 731 goto error;
712 732
713 ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100; 733 ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100;
714 mast &= ~0x00100000; 734 mast &= ~0x00100000;
715 mast |= 3; 735 mast |= 3;
716 736
@@ -723,7 +743,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
723 * cases will be handled by tying to nvclk, but it's possible there's 743 * cases will be handled by tying to nvclk, but it's possible there's
724 * corners 744 * corners
725 */ 745 */
726 ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100; 746 ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
727 747
728 if (P1-- && perflvl->shader == (perflvl->core << 1)) { 748 if (P1-- && perflvl->shader == (perflvl->core << 1)) {
729 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); 749 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
@@ -752,11 +772,12 @@ error:
752static int 772static int
753prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq) 773prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
754{ 774{
755 struct drm_nouveau_private *dev_priv = dev->dev_private; 775 struct nouveau_device *device = nouveau_dev(dev);
776 struct nouveau_drm *drm = nouveau_drm(dev);
756 u32 hwsq_data, hwsq_kick; 777 u32 hwsq_data, hwsq_kick;
757 int i; 778 int i;
758 779
759 if (dev_priv->chipset < 0x94) { 780 if (nv_device(drm->device)->chipset < 0x94) {
760 hwsq_data = 0x001400; 781 hwsq_data = 0x001400;
761 hwsq_kick = 0x00000003; 782 hwsq_kick = 0x00000003;
762 } else { 783 } else {
@@ -764,22 +785,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
764 hwsq_kick = 0x00000001; 785 hwsq_kick = 0x00000001;
765 } 786 }
766 /* upload hwsq ucode */ 787 /* upload hwsq ucode */
767 nv_mask(dev, 0x001098, 0x00000008, 0x00000000); 788 nv_mask(device, 0x001098, 0x00000008, 0x00000000);
768 nv_wr32(dev, 0x001304, 0x00000000); 789 nv_wr32(device, 0x001304, 0x00000000);
769 if (dev_priv->chipset >= 0x92) 790 if (nv_device(drm->device)->chipset >= 0x92)
770 nv_wr32(dev, 0x001318, 0x00000000); 791 nv_wr32(device, 0x001318, 0x00000000);
771 for (i = 0; i < hwsq->len / 4; i++) 792 for (i = 0; i < hwsq->len / 4; i++)
772 nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]); 793 nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
773 nv_mask(dev, 0x001098, 0x00000018, 0x00000018); 794 nv_mask(device, 0x001098, 0x00000018, 0x00000018);
774 795
775 /* launch, and wait for completion */ 796 /* launch, and wait for completion */
776 nv_wr32(dev, 0x00130c, hwsq_kick); 797 nv_wr32(device, 0x00130c, hwsq_kick);
777 if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) { 798 if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
778 NV_ERROR(dev, "hwsq ucode exec timed out\n"); 799 NV_ERROR(drm, "hwsq ucode exec timed out\n");
779 NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308)); 800 NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
780 for (i = 0; i < hwsq->len / 4; i++) { 801 for (i = 0; i < hwsq->len / 4; i++) {
781 NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4), 802 NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
782 nv_rd32(dev, 0x001400 + (i * 4))); 803 nv_rd32(device, 0x001400 + (i * 4)));
783 } 804 }
784 805
785 return -EIO; 806 return -EIO;
@@ -791,20 +812,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
791int 812int
792nv50_pm_clocks_set(struct drm_device *dev, void *data) 813nv50_pm_clocks_set(struct drm_device *dev, void *data)
793{ 814{
815 struct nouveau_device *device = nouveau_dev(dev);
794 struct nv50_pm_state *info = data; 816 struct nv50_pm_state *info = data;
795 struct bit_entry M; 817 struct bit_entry M;
796 int ret = -EBUSY; 818 int ret = -EBUSY;
797 819
798 /* halt and idle execution engines */ 820 /* halt and idle execution engines */
799 nv_mask(dev, 0x002504, 0x00000001, 0x00000001); 821 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
800 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) 822 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
801 goto resume; 823 goto resume;
802 if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f)) 824 if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
803 goto resume; 825 goto resume;
804 826
805 /* program memory clock, if necessary - must come before engine clock 827 /* program memory clock, if necessary - must come before engine clock
806 * reprogramming due to how we construct the hwsq scripts in pre() 828 * reprogramming due to how we construct the hwsq scripts in pre()
807 */ 829 */
830#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
808 if (info->mclk_hwsq.len) { 831 if (info->mclk_hwsq.len) {
809 /* execute some scripts that do ??? from the vbios.. */ 832 /* execute some scripts that do ??? from the vbios.. */
810 if (!bit_table(dev, 'M', &M) && M.version == 1) { 833 if (!bit_table(dev, 'M', &M) && M.version == 1) {
@@ -826,61 +849,7 @@ nv50_pm_clocks_set(struct drm_device *dev, void *data)
826 ret = prog_hwsq(dev, &info->eclk_hwsq); 849 ret = prog_hwsq(dev, &info->eclk_hwsq);
827 850
828resume: 851resume:
829 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 852 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
830 kfree(info); 853 kfree(info);
831 return ret; 854 return ret;
832} 855}
833
834static int
835pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
836{
837 if (*line == 0x04) {
838 *ctrl = 0x00e100;
839 *line = 4;
840 *indx = 0;
841 } else
842 if (*line == 0x09) {
843 *ctrl = 0x00e100;
844 *line = 9;
845 *indx = 1;
846 } else
847 if (*line == 0x10) {
848 *ctrl = 0x00e28c;
849 *line = 0;
850 *indx = 0;
851 } else {
852 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
853 return -ENODEV;
854 }
855
856 return 0;
857}
858
859int
860nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
861{
862 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
863 if (ret)
864 return ret;
865
866 if (nv_rd32(dev, ctrl) & (1 << line)) {
867 *divs = nv_rd32(dev, 0x00e114 + (id * 8));
868 *duty = nv_rd32(dev, 0x00e118 + (id * 8));
869 return 0;
870 }
871
872 return -EINVAL;
873}
874
875int
876nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
877{
878 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
879 if (ret)
880 return ret;
881
882 nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
883 nv_wr32(dev, 0x00e114 + (id * 8), divs);
884 nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
885 return 0;
886}
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
deleted file mode 100644
index df554d9dacb8..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nv50_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv50_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
45 struct nouveau_gpuobj *gpuobj;
46
47 gpuobj = nouveau_ramht_find(chan, data);
48 if (!gpuobj)
49 return -ENOENT;
50
51 pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
52 return 0;
53}
54
55static int
56mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
57{
58 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
59 pch->base.vblank.offset = data;
60 return 0;
61}
62
63static int
64mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
65{
66 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
67 pch->base.vblank.value = data;
68 return 0;
69}
70
71static int
72mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
73{
74 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
75 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
76 struct drm_device *dev = chan->dev;
77
78 if (data > 1)
79 return -EINVAL;
80
81 drm_vblank_get(dev, data);
82
83 pch->base.vblank.head = data;
84 list_add(&pch->base.vblank.list, &psw->base.vblank);
85 return 0;
86}
87
88static int
89mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
90{
91 nouveau_finish_page_flip(chan, NULL);
92 return 0;
93}
94
95static int
96nv50_software_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
99 struct nv50_display *pdisp = nv50_display(chan->dev);
100 struct nv50_software_chan *pch;
101 int ret = 0, i;
102
103 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
104 if (!pch)
105 return -ENOMEM;
106
107 nouveau_software_context_new(&pch->base);
108 pch->base.vblank.channel = chan->ramin->vinst >> 12;
109 chan->engctx[engine] = pch;
110
111 /* dma objects for display sync channel semaphore blocks */
112 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
113 struct nv50_display_crtc *dispc = &pdisp->crtc[i];
114 struct nouveau_gpuobj *obj = NULL;
115
116 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
117 dispc->sem.bo->bo.offset, 0x1000,
118 NV_MEM_ACCESS_RW,
119 NV_MEM_TARGET_VRAM, &obj);
120 if (ret)
121 break;
122
123 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
124 nouveau_gpuobj_ref(NULL, &obj);
125 }
126
127 if (ret)
128 psw->base.base.context_del(chan, engine);
129 return ret;
130}
131
132static void
133nv50_software_context_del(struct nouveau_channel *chan, int engine)
134{
135 struct nv50_software_chan *pch = chan->engctx[engine];
136 chan->engctx[engine] = NULL;
137 kfree(pch);
138}
139
140static int
141nv50_software_object_new(struct nouveau_channel *chan, int engine,
142 u32 handle, u16 class)
143{
144 struct drm_device *dev = chan->dev;
145 struct nouveau_gpuobj *obj = NULL;
146 int ret;
147
148 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
149 if (ret)
150 return ret;
151 obj->engine = 0;
152 obj->class = class;
153
154 ret = nouveau_ramht_insert(chan, handle, obj);
155 nouveau_gpuobj_ref(NULL, &obj);
156 return ret;
157}
158
159static int
160nv50_software_init(struct drm_device *dev, int engine)
161{
162 return 0;
163}
164
165static int
166nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
167{
168 return 0;
169}
170
171static void
172nv50_software_destroy(struct drm_device *dev, int engine)
173{
174 struct nv50_software_priv *psw = nv_engine(dev, engine);
175
176 NVOBJ_ENGINE_DEL(dev, SW);
177 kfree(psw);
178}
179
180int
181nv50_software_create(struct drm_device *dev)
182{
183 struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
184 if (!psw)
185 return -ENOMEM;
186
187 psw->base.base.destroy = nv50_software_destroy;
188 psw->base.base.init = nv50_software_init;
189 psw->base.base.fini = nv50_software_fini;
190 psw->base.base.context_new = nv50_software_context_new;
191 psw->base.base.context_del = nv50_software_context_del;
192 psw->base.base.object_new = nv50_software_object_new;
193 nouveau_software_create(&psw->base);
194
195 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
196 NVOBJ_CLASS(dev, 0x506e, SW);
197 NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
198 NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
199 NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
200 NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
201 NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 93240bde891b..48644e379e86 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -29,35 +29,40 @@
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h" 31#include "nouveau_reg.h"
32#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <subdev/timer.h>
40
39static u32 41static u32
40nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) 42nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
41{ 43{
42 struct drm_nouveau_private *dev_priv = dev->dev_private; 44 struct nouveau_drm *drm = nouveau_drm(dev);
43 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 45 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
44 static const u8 nv50[] = { 16, 8, 0, 24 }; 46 static const u8 nv50[] = { 16, 8, 0, 24 };
45 if (dev_priv->chipset == 0xaf) 47 if (nv_device(drm->device)->chipset == 0xaf)
46 return nvaf[lane]; 48 return nvaf[lane];
47 return nv50[lane]; 49 return nv50[lane];
48} 50}
49 51
50static void 52static void
51nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) 53nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
52{ 54{
55 struct nouveau_device *device = nouveau_dev(dev);
53 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 56 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
54 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24); 57 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
55} 58}
56 59
57static void 60static void
58nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, 61nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
59 u8 lane, u8 swing, u8 preem) 62 u8 lane, u8 swing, u8 preem)
60{ 63{
64 struct nouveau_device *device = nouveau_dev(dev);
65 struct nouveau_drm *drm = nouveau_drm(dev);
61 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 66 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
62 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane); 67 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
63 u32 mask = 0x000000ff << shift; 68 u32 mask = 0x000000ff << shift;
@@ -65,7 +70,7 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
65 70
66 table = nouveau_dp_bios_data(dev, dcb, &entry); 71 table = nouveau_dp_bios_data(dev, dcb, &entry);
67 if (!table || (table[0] != 0x20 && table[0] != 0x21)) { 72 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
68 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 73 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
69 return; 74 return;
70 } 75 }
71 76
@@ -76,24 +81,26 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
76 return; 81 return;
77 } 82 }
78 83
79 nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift); 84 nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
80 nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift); 85 nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
81 nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8); 86 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
82} 87}
83 88
84static void 89static void
85nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, 90nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
86 int link_nr, u32 link_bw, bool enhframe) 91 int link_nr, u32 link_bw, bool enhframe)
87{ 92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 struct nouveau_drm *drm = nouveau_drm(dev);
88 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 95 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
89 u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000; 96 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
90 u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000; 97 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
91 u8 *table, *entry, mask; 98 u8 *table, *entry, mask;
92 int i; 99 int i;
93 100
94 table = nouveau_dp_bios_data(dev, dcb, &entry); 101 table = nouveau_dp_bios_data(dev, dcb, &entry);
95 if (!table || (table[0] != 0x20 && table[0] != 0x21)) { 102 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
96 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 103 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
97 return; 104 return;
98 } 105 }
99 106
@@ -112,20 +119,21 @@ nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
112 if (link_bw > 162000) 119 if (link_bw > 162000)
113 clksor |= 0x00040000; 120 clksor |= 0x00040000;
114 121
115 nv_wr32(dev, 0x614300 + (or * 0x800), clksor); 122 nv_wr32(device, 0x614300 + (or * 0x800), clksor);
116 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl); 123 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
117 124
118 mask = 0; 125 mask = 0;
119 for (i = 0; i < link_nr; i++) 126 for (i = 0; i < link_nr; i++)
120 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3); 127 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
121 nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask); 128 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
122} 129}
123 130
124static void 131static void
125nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw) 132nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
126{ 133{
127 u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000; 134 struct nouveau_device *device = nouveau_dev(dev);
128 u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)); 135 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
136 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
129 if (clksor & 0x000c0000) 137 if (clksor & 0x000c0000)
130 *bw = 270000; 138 *bw = 270000;
131 else 139 else
@@ -139,6 +147,8 @@ nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
139void 147void
140nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) 148nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
141{ 149{
150 struct nouveau_device *device = nouveau_dev(dev);
151 struct nouveau_drm *drm = nouveau_drm(dev);
142 const u32 symbol = 100000; 152 const u32 symbol = 100000;
143 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 153 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
144 int TU, VTUi, VTUf, VTUa; 154 int TU, VTUi, VTUf, VTUa;
@@ -206,7 +216,7 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
206 } 216 }
207 217
208 if (!bestTU) { 218 if (!bestTU) {
209 NV_ERROR(dev, "DP: unable to find suitable config\n"); 219 NV_ERROR(drm, "DP: unable to find suitable config\n");
210 return; 220 return;
211 } 221 }
212 222
@@ -217,8 +227,8 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
217 r = do_div(unk, symbol); 227 r = do_div(unk, symbol);
218 unk += 6; 228 unk += 6;
219 229
220 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); 230 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
221 nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | 231 nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
222 bestVTUf << 16 | 232 bestVTUf << 16 |
223 bestVTUi << 8 | 233 bestVTUi << 8 |
224 unk); 234 unk);
@@ -227,6 +237,7 @@ static void
227nv50_sor_disconnect(struct drm_encoder *encoder) 237nv50_sor_disconnect(struct drm_encoder *encoder)
228{ 238{
229 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 239 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
240 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
230 struct drm_device *dev = encoder->dev; 241 struct drm_device *dev = encoder->dev;
231 struct nouveau_channel *evo = nv50_display(dev)->master; 242 struct nouveau_channel *evo = nv50_display(dev)->master;
232 int ret; 243 int ret;
@@ -235,11 +246,11 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
235 return; 246 return;
236 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); 247 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
237 248
238 NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); 249 NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
239 250
240 ret = RING_SPACE(evo, 4); 251 ret = RING_SPACE(evo, 4);
241 if (ret) { 252 if (ret) {
242 NV_ERROR(dev, "no space while disconnecting SOR\n"); 253 NV_ERROR(drm, "no space while disconnecting SOR\n");
243 return; 254 return;
244 } 255 }
245 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 256 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
@@ -256,22 +267,24 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
256static void 267static void
257nv50_sor_dpms(struct drm_encoder *encoder, int mode) 268nv50_sor_dpms(struct drm_encoder *encoder, int mode)
258{ 269{
270 struct nouveau_device *device = nouveau_dev(encoder->dev);
271 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
259 struct drm_device *dev = encoder->dev; 272 struct drm_device *dev = encoder->dev;
260 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
261 struct drm_encoder *enc; 274 struct drm_encoder *enc;
262 uint32_t val; 275 uint32_t val;
263 int or = nv_encoder->or; 276 int or = nv_encoder->or;
264 277
265 NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); 278 NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
266 279
267 nv_encoder->last_dpms = mode; 280 nv_encoder->last_dpms = mode;
268 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 281 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
269 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 282 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
270 283
271 if (nvenc == nv_encoder || 284 if (nvenc == nv_encoder ||
272 (nvenc->dcb->type != OUTPUT_TMDS && 285 (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
273 nvenc->dcb->type != OUTPUT_LVDS && 286 nvenc->dcb->type != DCB_OUTPUT_LVDS &&
274 nvenc->dcb->type != OUTPUT_DP) || 287 nvenc->dcb->type != DCB_OUTPUT_DP) ||
275 nvenc->dcb->or != nv_encoder->dcb->or) 288 nvenc->dcb->or != nv_encoder->dcb->or)
276 continue; 289 continue;
277 290
@@ -280,30 +293,30 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
280 } 293 }
281 294
282 /* wait for it to be done */ 295 /* wait for it to be done */
283 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), 296 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
284 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 297 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
285 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); 298 NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
286 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, 299 NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
287 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); 300 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
288 } 301 }
289 302
290 val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); 303 val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
291 304
292 if (mode == DRM_MODE_DPMS_ON) 305 if (mode == DRM_MODE_DPMS_ON)
293 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; 306 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
294 else 307 else
295 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; 308 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
296 309
297 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | 310 nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
298 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); 311 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
299 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or), 312 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
300 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 313 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
301 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); 314 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
302 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, 315 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
303 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); 316 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
304 } 317 }
305 318
306 if (nv_encoder->dcb->type == OUTPUT_DP) { 319 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
307 struct dp_train_func func = { 320 struct dp_train_func func = {
308 .link_set = nv50_sor_dp_link_set, 321 .link_set = nv50_sor_dp_link_set,
309 .train_set = nv50_sor_dp_train_set, 322 .train_set = nv50_sor_dp_train_set,
@@ -317,13 +330,15 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
317static void 330static void
318nv50_sor_save(struct drm_encoder *encoder) 331nv50_sor_save(struct drm_encoder *encoder)
319{ 332{
320 NV_ERROR(encoder->dev, "!!\n"); 333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 NV_ERROR(drm, "!!\n");
321} 335}
322 336
323static void 337static void
324nv50_sor_restore(struct drm_encoder *encoder) 338nv50_sor_restore(struct drm_encoder *encoder)
325{ 339{
326 NV_ERROR(encoder->dev, "!!\n"); 340 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
341 NV_ERROR(drm, "!!\n");
327} 342}
328 343
329static bool 344static bool
@@ -331,14 +346,15 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder,
331 const struct drm_display_mode *mode, 346 const struct drm_display_mode *mode,
332 struct drm_display_mode *adjusted_mode) 347 struct drm_display_mode *adjusted_mode)
333{ 348{
349 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 350 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
335 struct nouveau_connector *connector; 351 struct nouveau_connector *connector;
336 352
337 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); 353 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
338 354
339 connector = nouveau_encoder_connector_get(nv_encoder); 355 connector = nouveau_encoder_connector_get(nv_encoder);
340 if (!connector) { 356 if (!connector) {
341 NV_ERROR(encoder->dev, "Encoder has no connector\n"); 357 NV_ERROR(drm, "Encoder has no connector\n");
342 return false; 358 return false;
343 } 359 }
344 360
@@ -354,7 +370,7 @@ nv50_sor_prepare(struct drm_encoder *encoder)
354{ 370{
355 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 371 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
356 nv50_sor_disconnect(encoder); 372 nv50_sor_disconnect(encoder);
357 if (nv_encoder->dcb->type == OUTPUT_DP) { 373 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
358 /* avoid race between link training and supervisor intr */ 374 /* avoid race between link training and supervisor intr */
359 nv50_display_sync(encoder->dev); 375 nv50_display_sync(encoder->dev);
360 } 376 }
@@ -371,18 +387,18 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
371{ 387{
372 struct nouveau_channel *evo = nv50_display(encoder->dev)->master; 388 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
373 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 389 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
374 struct drm_device *dev = encoder->dev; 390 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
375 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 391 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
376 struct nouveau_connector *nv_connector; 392 struct nouveau_connector *nv_connector;
377 uint32_t mode_ctl = 0; 393 uint32_t mode_ctl = 0;
378 int ret; 394 int ret;
379 395
380 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", 396 NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
381 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 397 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
382 nv_encoder->crtc = encoder->crtc; 398 nv_encoder->crtc = encoder->crtc;
383 399
384 switch (nv_encoder->dcb->type) { 400 switch (nv_encoder->dcb->type) {
385 case OUTPUT_TMDS: 401 case DCB_OUTPUT_TMDS:
386 if (nv_encoder->dcb->sorconf.link & 1) { 402 if (nv_encoder->dcb->sorconf.link & 1) {
387 if (mode->clock < 165000) 403 if (mode->clock < 165000)
388 mode_ctl = 0x0100; 404 mode_ctl = 0x0100;
@@ -393,7 +409,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
393 409
394 nouveau_hdmi_mode_set(encoder, mode); 410 nouveau_hdmi_mode_set(encoder, mode);
395 break; 411 break;
396 case OUTPUT_DP: 412 case DCB_OUTPUT_DP:
397 nv_connector = nouveau_encoder_connector_get(nv_encoder); 413 nv_connector = nouveau_encoder_connector_get(nv_encoder);
398 if (nv_connector && nv_connector->base.display_info.bpc == 6) { 414 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
399 nv_encoder->dp.datarate = mode->clock * 18 / 8; 415 nv_encoder->dp.datarate = mode->clock * 18 / 8;
@@ -427,7 +443,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
427 443
428 ret = RING_SPACE(evo, 2); 444 ret = RING_SPACE(evo, 2);
429 if (ret) { 445 if (ret) {
430 NV_ERROR(dev, "no space while connecting SOR\n"); 446 NV_ERROR(drm, "no space while connecting SOR\n");
431 nv_encoder->crtc = NULL; 447 nv_encoder->crtc = NULL;
432 return; 448 return;
433 } 449 }
@@ -458,11 +474,9 @@ static void
458nv50_sor_destroy(struct drm_encoder *encoder) 474nv50_sor_destroy(struct drm_encoder *encoder)
459{ 475{
460 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 476 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
477 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
461 478
462 if (!encoder) 479 NV_DEBUG(drm, "\n");
463 return;
464
465 NV_DEBUG_KMS(encoder->dev, "\n");
466 480
467 drm_encoder_cleanup(encoder); 481 drm_encoder_cleanup(encoder);
468 482
@@ -474,21 +488,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
474}; 488};
475 489
476int 490int
477nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) 491nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
478{ 492{
479 struct nouveau_encoder *nv_encoder = NULL; 493 struct nouveau_encoder *nv_encoder = NULL;
480 struct drm_device *dev = connector->dev; 494 struct drm_device *dev = connector->dev;
495 struct nouveau_drm *drm = nouveau_drm(dev);
481 struct drm_encoder *encoder; 496 struct drm_encoder *encoder;
482 int type; 497 int type;
483 498
484 NV_DEBUG_KMS(dev, "\n"); 499 NV_DEBUG(drm, "\n");
485 500
486 switch (entry->type) { 501 switch (entry->type) {
487 case OUTPUT_TMDS: 502 case DCB_OUTPUT_TMDS:
488 case OUTPUT_DP: 503 case DCB_OUTPUT_DP:
489 type = DRM_MODE_ENCODER_TMDS; 504 type = DRM_MODE_ENCODER_TMDS;
490 break; 505 break;
491 case OUTPUT_LVDS: 506 case DCB_OUTPUT_LVDS:
492 type = DRM_MODE_ENCODER_LVDS; 507 type = DRM_MODE_ENCODER_LVDS;
493 break; 508 break;
494 default: 509 default:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
deleted file mode 100644
index 9ed9ae397d75..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static int types[0x80] = {
30 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
32 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
35 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
37 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
38};
39
40bool
41nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
42{
43 int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
44
45 if (likely(type < ARRAY_SIZE(types) && types[type]))
46 return true;
47 return false;
48}
49
50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
55 struct nouveau_mm_node *this;
56 struct nouveau_mem *mem;
57
58 mem = *pmem;
59 *pmem = NULL;
60 if (unlikely(mem == NULL))
61 return;
62
63 mutex_lock(&mm->mutex);
64 while (!list_empty(&mem->regions)) {
65 this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
66
67 list_del(&this->rl_entry);
68 nouveau_mm_put(mm, this);
69 }
70
71 if (mem->tag) {
72 drm_mm_put_block(mem->tag);
73 mem->tag = NULL;
74 }
75 mutex_unlock(&mm->mutex);
76
77 kfree(mem);
78}
79
80int
81nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
82 u32 memtype, struct nouveau_mem **pmem)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
86 struct nouveau_mm_node *r;
87 struct nouveau_mem *mem;
88 int comp = (memtype & 0x300) >> 8;
89 int type = (memtype & 0x07f);
90 int ret;
91
92 if (!types[type])
93 return -EINVAL;
94 size >>= 12;
95 align >>= 12;
96 size_nc >>= 12;
97
98 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
99 if (!mem)
100 return -ENOMEM;
101
102 mutex_lock(&mm->mutex);
103 if (comp) {
104 if (align == 16) {
105 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
106 int n = (size >> 4) * comp;
107
108 mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
109 if (mem->tag)
110 mem->tag = drm_mm_get_block(mem->tag, n, 0);
111 }
112
113 if (unlikely(!mem->tag))
114 comp = 0;
115 }
116
117 INIT_LIST_HEAD(&mem->regions);
118 mem->dev = dev_priv->dev;
119 mem->memtype = (comp << 7) | type;
120 mem->size = size;
121
122 do {
123 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
124 if (ret) {
125 mutex_unlock(&mm->mutex);
126 nv50_vram_del(dev, &mem);
127 return ret;
128 }
129
130 list_add_tail(&r->rl_entry, &mem->regions);
131 size -= r->length;
132 } while (size);
133 mutex_unlock(&mm->mutex);
134
135 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
136 mem->offset = (u64)r->offset << 12;
137 *pmem = mem;
138 return 0;
139}
140
141static u32
142nv50_vram_rblock(struct drm_device *dev)
143{
144 struct drm_nouveau_private *dev_priv = dev->dev_private;
145 int i, parts, colbits, rowbitsa, rowbitsb, banks;
146 u64 rowsize, predicted;
147 u32 r0, r4, rt, ru, rblock_size;
148
149 r0 = nv_rd32(dev, 0x100200);
150 r4 = nv_rd32(dev, 0x100204);
151 rt = nv_rd32(dev, 0x100250);
152 ru = nv_rd32(dev, 0x001540);
153 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
154
155 for (i = 0, parts = 0; i < 8; i++) {
156 if (ru & (0x00010000 << i))
157 parts++;
158 }
159
160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164
165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa;
167 if (r0 & 0x00000004)
168 predicted += rowsize << rowbitsb;
169
170 if (predicted != dev_priv->vram_size) {
171 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
172 (u32)(dev_priv->vram_size >> 20));
173 NV_WARN(dev, "we calculated %dMiB VRAM\n",
174 (u32)(predicted >> 20));
175 }
176
177 rblock_size = rowsize;
178 if (rt & 1)
179 rblock_size *= 3;
180
181 NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
182 return rblock_size;
183}
184
185int
186nv50_vram_init(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
190 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
191 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
192 u32 pfb714 = nv_rd32(dev, 0x100714);
193 u32 rblock, length;
194
195 switch (pfb714 & 0x00000007) {
196 case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
197 case 1:
198 if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
199 dev_priv->vram_type = NV_MEM_TYPE_DDR3;
200 else
201 dev_priv->vram_type = NV_MEM_TYPE_DDR2;
202 break;
203 case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
204 case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
205 case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
206 default:
207 break;
208 }
209
210 dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
211 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
212 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
213 dev_priv->vram_size &= 0xffffffff00ULL;
214
215 /* IGPs, no funky reordering happens here, they don't have VRAM */
216 if (dev_priv->chipset == 0xaa ||
217 dev_priv->chipset == 0xac ||
218 dev_priv->chipset == 0xaf) {
219 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
220 rblock = 4096 >> 12;
221 } else {
222 rblock = nv50_vram_rblock(dev) >> 12;
223 }
224
225 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
226
227 return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
228}
229
230void
231nv50_vram_fini(struct drm_device *dev)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
235
236 nouveau_mm_fini(&vram->mm);
237}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
deleted file mode 100644
index 74875739bcc0..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_bsp.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_bsp.c...
34 */
35
36struct nv84_bsp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00008000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_bsp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
55 return 0;
56}
57
58static void
59nv84_bsp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, BSP);
64
65 kfree(pbsp);
66}
67
68int
69nv84_bsp_create(struct drm_device *dev)
70{
71 struct nv84_bsp_engine *pbsp;
72
73 pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
74 if (!pbsp)
75 return -ENOMEM;
76
77 pbsp->base.destroy = nv84_bsp_destroy;
78 pbsp->base.init = nv84_bsp_init;
79 pbsp->base.fini = nv84_bsp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
deleted file mode 100644
index bbfcc73b6708..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31struct nv84_crypt_engine {
32 struct nouveau_exec_engine base;
33};
34
35static int
36nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
37{
38 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx;
42 int ret;
43
44 NV_DEBUG(dev, "ch%d\n", chan->id);
45
46 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
47 NVOBJ_FLAG_ZERO_FREE, &ctx);
48 if (ret)
49 return ret;
50
51 nv_wo32(ramin, 0xa0, 0x00190000);
52 nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
53 nv_wo32(ramin, 0xa8, ctx->vinst);
54 nv_wo32(ramin, 0xac, 0);
55 nv_wo32(ramin, 0xb0, 0);
56 nv_wo32(ramin, 0xb4, 0);
57 dev_priv->engine.instmem.flush(dev);
58
59 atomic_inc(&chan->vm->engref[engine]);
60 chan->engctx[engine] = ctx;
61 return 0;
62}
63
64static void
65nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
66{
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
68 struct drm_device *dev = chan->dev;
69 u32 inst;
70
71 inst = (chan->ramin->vinst >> 12);
72 inst |= 0x80000000;
73
74 /* mark context as invalid if still on the hardware, not
75 * doing this causes issues the next time PCRYPT is used,
76 * unsurprisingly :)
77 */
78 nv_wr32(dev, 0x10200c, 0x00000000);
79 if (nv_rd32(dev, 0x102188) == inst)
80 nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
81 if (nv_rd32(dev, 0x10218c) == inst)
82 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
83 nv_wr32(dev, 0x10200c, 0x00000010);
84
85 nouveau_gpuobj_ref(NULL, &ctx);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89}
90
91static int
92nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL;
98 int ret;
99
100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
101 if (ret)
102 return ret;
103 obj->engine = 5;
104 obj->class = class;
105
106 nv_wo32(obj, 0x00, class);
107 dev_priv->engine.instmem.flush(dev);
108
109 ret = nouveau_ramht_insert(chan, handle, obj);
110 nouveau_gpuobj_ref(NULL, &obj);
111 return ret;
112}
113
114static void
115nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
116{
117 nv50_vm_flush_engine(dev, 0x0a);
118}
119
120static struct nouveau_bitfield nv84_crypt_intr[] = {
121 { 0x00000001, "INVALID_STATE" },
122 { 0x00000002, "ILLEGAL_MTHD" },
123 { 0x00000004, "ILLEGAL_CLASS" },
124 { 0x00000080, "QUERY" },
125 { 0x00000100, "FAULT" },
126 {}
127};
128
129static void
130nv84_crypt_isr(struct drm_device *dev)
131{
132 u32 stat = nv_rd32(dev, 0x102130);
133 u32 mthd = nv_rd32(dev, 0x102190);
134 u32 data = nv_rd32(dev, 0x102194);
135 u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
136 int show = nouveau_ratelimit();
137 int chid = nv50_graph_isr_chid(dev, inst);
138
139 if (show) {
140 NV_INFO(dev, "PCRYPT:");
141 nouveau_bitfield_print(nv84_crypt_intr, stat);
142 printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
143 chid, inst, mthd, data);
144 }
145
146 nv_wr32(dev, 0x102130, stat);
147 nv_wr32(dev, 0x10200c, 0x10);
148
149 nv50_fb_vm_trap(dev, show);
150}
151
152static int
153nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
154{
155 nv_wr32(dev, 0x102140, 0x00000000);
156 return 0;
157}
158
159static int
160nv84_crypt_init(struct drm_device *dev, int engine)
161{
162 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
163 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
164
165 nv_wr32(dev, 0x102130, 0xffffffff);
166 nv_wr32(dev, 0x102140, 0xffffffbf);
167
168 nv_wr32(dev, 0x10200c, 0x00000010);
169 return 0;
170}
171
172static void
173nv84_crypt_destroy(struct drm_device *dev, int engine)
174{
175 struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
176
177 NVOBJ_ENGINE_DEL(dev, CRYPT);
178
179 nouveau_irq_unregister(dev, 14);
180 kfree(pcrypt);
181}
182
183int
184nv84_crypt_create(struct drm_device *dev)
185{
186 struct nv84_crypt_engine *pcrypt;
187
188 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
189 if (!pcrypt)
190 return -ENOMEM;
191
192 pcrypt->base.destroy = nv84_crypt_destroy;
193 pcrypt->base.init = nv84_crypt_init;
194 pcrypt->base.fini = nv84_crypt_fini;
195 pcrypt->base.context_new = nv84_crypt_context_new;
196 pcrypt->base.context_del = nv84_crypt_context_del;
197 pcrypt->base.object_new = nv84_crypt_object_new;
198 pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
199
200 nouveau_irq_register(dev, 14, nv84_crypt_isr);
201
202 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
203 NVOBJ_CLASS (dev, 0x74c1, CRYPT);
204 return 0;
205}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index c2f889b0d340..c686650584b6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,13 +22,17 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include <engine/fifo.h>
29
30#include "nouveau_drm.h"
27#include "nouveau_dma.h" 31#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h" 32#include "nouveau_fence.h"
31 33
34#include "nv50_display.h"
35
32struct nv84_fence_chan { 36struct nv84_fence_chan {
33 struct nouveau_fence_chan base; 37 struct nouveau_fence_chan base;
34}; 38};
@@ -42,13 +46,14 @@ static int
42nv84_fence_emit(struct nouveau_fence *fence) 46nv84_fence_emit(struct nouveau_fence *fence)
43{ 47{
44 struct nouveau_channel *chan = fence->channel; 48 struct nouveau_channel *chan = fence->channel;
49 struct nouveau_fifo_chan *fifo = (void *)chan->object;
45 int ret = RING_SPACE(chan, 7); 50 int ret = RING_SPACE(chan, 7);
46 if (ret == 0) { 51 if (ret == 0) {
47 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 52 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
48 OUT_RING (chan, NvSema); 53 OUT_RING (chan, NvSema);
49 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 54 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
50 OUT_RING (chan, upper_32_bits(chan->id * 16)); 55 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
51 OUT_RING (chan, lower_32_bits(chan->id * 16)); 56 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
52 OUT_RING (chan, fence->sequence); 57 OUT_RING (chan, fence->sequence);
53 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 58 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
54 FIRE_RING (chan); 59 FIRE_RING (chan);
@@ -61,13 +66,14 @@ static int
61nv84_fence_sync(struct nouveau_fence *fence, 66nv84_fence_sync(struct nouveau_fence *fence,
62 struct nouveau_channel *prev, struct nouveau_channel *chan) 67 struct nouveau_channel *prev, struct nouveau_channel *chan)
63{ 68{
69 struct nouveau_fifo_chan *fifo = (void *)prev->object;
64 int ret = RING_SPACE(chan, 7); 70 int ret = RING_SPACE(chan, 7);
65 if (ret == 0) { 71 if (ret == 0) {
66 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 72 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
67 OUT_RING (chan, NvSema); 73 OUT_RING (chan, NvSema);
68 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 74 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
69 OUT_RING (chan, upper_32_bits(prev->id * 16)); 75 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
70 OUT_RING (chan, lower_32_bits(prev->id * 16)); 76 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
71 OUT_RING (chan, fence->sequence); 77 OUT_RING (chan, fence->sequence);
72 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); 78 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
73 FIRE_RING (chan); 79 FIRE_RING (chan);
@@ -78,100 +84,99 @@ nv84_fence_sync(struct nouveau_fence *fence,
78static u32 84static u32
79nv84_fence_read(struct nouveau_channel *chan) 85nv84_fence_read(struct nouveau_channel *chan)
80{ 86{
81 struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 87 struct nouveau_fifo_chan *fifo = (void *)chan->object;
82 return nv_ro32(priv->mem, chan->id * 16); 88 struct nv84_fence_priv *priv = chan->drm->fence;
89 return nv_ro32(priv->mem, fifo->chid * 16);
83} 90}
84 91
85static void 92static void
86nv84_fence_context_del(struct nouveau_channel *chan, int engine) 93nv84_fence_context_del(struct nouveau_channel *chan)
87{ 94{
88 struct nv84_fence_chan *fctx = chan->engctx[engine]; 95 struct nv84_fence_chan *fctx = chan->fence;
89 nouveau_fence_context_del(&fctx->base); 96 nouveau_fence_context_del(&fctx->base);
90 chan->engctx[engine] = NULL; 97 chan->fence = NULL;
91 kfree(fctx); 98 kfree(fctx);
92} 99}
93 100
94static int 101static int
95nv84_fence_context_new(struct nouveau_channel *chan, int engine) 102nv84_fence_context_new(struct nouveau_channel *chan)
96{ 103{
97 struct nv84_fence_priv *priv = nv_engine(chan->dev, engine); 104 struct drm_device *dev = chan->drm->dev;
105 struct nouveau_fifo_chan *fifo = (void *)chan->object;
106 struct nv84_fence_priv *priv = chan->drm->fence;
98 struct nv84_fence_chan *fctx; 107 struct nv84_fence_chan *fctx;
99 struct nouveau_gpuobj *obj; 108 struct nouveau_object *object;
100 int ret; 109 int ret, i;
101 110
102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 111 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
103 if (!fctx) 112 if (!fctx)
104 return -ENOMEM; 113 return -ENOMEM;
105 114
106 nouveau_fence_context_new(&fctx->base); 115 nouveau_fence_context_new(&fctx->base);
107 116
108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 117 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
109 priv->mem->vinst, priv->mem->size, 118 NvSema, 0x0002,
110 NV_MEM_ACCESS_RW, 119 &(struct nv_dma_class) {
111 NV_MEM_TARGET_VRAM, &obj); 120 .flags = NV_DMA_TARGET_VRAM |
112 if (ret == 0) { 121 NV_DMA_ACCESS_RDWR,
113 ret = nouveau_ramht_insert(chan, NvSema, obj); 122 .start = priv->mem->addr,
114 nouveau_gpuobj_ref(NULL, &obj); 123 .limit = priv->mem->addr +
115 nv_wo32(priv->mem, chan->id * 16, 0x00000000); 124 priv->mem->size - 1,
125 }, sizeof(struct nv_dma_class),
126 &object);
127
128 /* dma objects for display sync channel semaphore blocks */
129 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
130 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
131
132 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
133 NvEvoSema0 + i, 0x003d,
134 &(struct nv_dma_class) {
135 .flags = NV_DMA_TARGET_VRAM |
136 NV_DMA_ACCESS_RDWR,
137 .start = bo->bo.offset,
138 .limit = bo->bo.offset + 0xfff,
139 }, sizeof(struct nv_dma_class),
140 &object);
116 } 141 }
117 142
118 if (ret) 143 if (ret)
119 nv84_fence_context_del(chan, engine); 144 nv84_fence_context_del(chan);
145 nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
120 return ret; 146 return ret;
121} 147}
122 148
123static int
124nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
125{
126 return 0;
127}
128
129static int
130nv84_fence_init(struct drm_device *dev, int engine)
131{
132 return 0;
133}
134
135static void 149static void
136nv84_fence_destroy(struct drm_device *dev, int engine) 150nv84_fence_destroy(struct nouveau_drm *drm)
137{ 151{
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 152 struct nv84_fence_priv *priv = drm->fence;
139 struct nv84_fence_priv *priv = nv_engine(dev, engine);
140
141 nouveau_gpuobj_ref(NULL, &priv->mem); 153 nouveau_gpuobj_ref(NULL, &priv->mem);
142 dev_priv->eng[engine] = NULL; 154 drm->fence = NULL;
143 kfree(priv); 155 kfree(priv);
144} 156}
145 157
146int 158int
147nv84_fence_create(struct drm_device *dev) 159nv84_fence_create(struct nouveau_drm *drm)
148{ 160{
149 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 161 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nv84_fence_priv *priv; 162 struct nv84_fence_priv *priv;
163 u32 chan = pfifo->max + 1;
152 int ret; 164 int ret;
153 165
154 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 166 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
155 if (!priv) 167 if (!priv)
156 return -ENOMEM; 168 return -ENOMEM;
157 169
158 priv->base.engine.destroy = nv84_fence_destroy; 170 priv->base.dtor = nv84_fence_destroy;
159 priv->base.engine.init = nv84_fence_init; 171 priv->base.context_new = nv84_fence_context_new;
160 priv->base.engine.fini = nv84_fence_fini; 172 priv->base.context_del = nv84_fence_context_del;
161 priv->base.engine.context_new = nv84_fence_context_new;
162 priv->base.engine.context_del = nv84_fence_context_del;
163 priv->base.emit = nv84_fence_emit; 173 priv->base.emit = nv84_fence_emit;
164 priv->base.sync = nv84_fence_sync; 174 priv->base.sync = nv84_fence_sync;
165 priv->base.read = nv84_fence_read; 175 priv->base.read = nv84_fence_read;
166 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
167
168 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
169 0x1000, 0, &priv->mem);
170 if (ret)
171 goto out;
172 176
173out: 177 ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
178 &priv->mem);
174 if (ret) 179 if (ret)
175 nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 180 nv84_fence_destroy(drm);
176 return ret; 181 return ret;
177} 182}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
deleted file mode 100644
index c564c5e4c30a..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ /dev/null
@@ -1,250 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_fifo.h"
31#include "nouveau_ramht.h"
32#include "nouveau_vm.h"
33
34struct nv84_fifo_priv {
35 struct nouveau_fifo_priv base;
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nv84_fifo_chan {
41 struct nouveau_fifo_chan base;
42 struct nouveau_gpuobj *ramfc;
43 struct nouveau_gpuobj *cache;
44};
45
46static int
47nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
50 struct nv84_fifo_chan *fctx;
51 struct drm_device *dev = chan->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
54 u64 instance;
55 unsigned long flags;
56 int ret;
57
58 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
59 if (!fctx)
60 return -ENOMEM;
61 atomic_inc(&chan->vm->engref[engine]);
62
63 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
64 NV50_USER(chan->id), PAGE_SIZE);
65 if (!chan->user) {
66 ret = -ENOMEM;
67 goto error;
68 }
69
70 ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
71 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
72 if (ret)
73 goto error;
74
75 instance = fctx->ramfc->vinst >> 8;
76
77 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
78 if (ret)
79 goto error;
80
81 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
82 nv_wo32(fctx->ramfc, 0x40, 0x00000000);
83 nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
84 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
85 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
86 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
87 drm_order(chan->dma.ib_max + 1) << 16);
88 nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
89 nv_wo32(fctx->ramfc, 0x78, 0x00000000);
90 nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
91 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
92 (4 << 24) /* SEARCH_FULL */ |
93 (chan->ramht->gpuobj->cinst >> 4));
94 nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
95 nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
96
97 nv_wo32(chan->ramin, 0x00, chan->id);
98 nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
99
100 dev_priv->engine.instmem.flush(dev);
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
104 nv50_fifo_playlist_update(dev);
105 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
106
107error:
108 if (ret)
109 priv->base.base.context_del(chan, engine);
110 return ret;
111}
112
113static void
114nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
115{
116 struct nv84_fifo_chan *fctx = chan->engctx[engine];
117 struct drm_device *dev = chan->dev;
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 unsigned long flags;
120 u32 save;
121
122 /* remove channel from playlist, will context switch if active */
123 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
124 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
125 nv50_fifo_playlist_update(dev);
126
127 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
128
129 /* tell any engines on this channel to unload their contexts */
130 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
131 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
132 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
133
134 nv_wr32(dev, 0x002520, save);
135
136 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
137 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
138
139 /* clean up */
140 if (chan->user) {
141 iounmap(chan->user);
142 chan->user = NULL;
143 }
144
145 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
146 nouveau_gpuobj_ref(NULL, &fctx->cache);
147
148 atomic_dec(&chan->vm->engref[engine]);
149 chan->engctx[engine] = NULL;
150 kfree(fctx);
151}
152
153static int
154nv84_fifo_init(struct drm_device *dev, int engine)
155{
156 struct drm_nouveau_private *dev_priv = dev->dev_private;
157 struct nv84_fifo_chan *fctx;
158 u32 instance;
159 int i;
160
161 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
162 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
163 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
164 nv_wr32(dev, 0x002044, 0x01003fff);
165
166 nv_wr32(dev, 0x002100, 0xffffffff);
167 nv_wr32(dev, 0x002140, 0xffffffff);
168
169 for (i = 0; i < 128; i++) {
170 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
171 if (chan && (fctx = chan->engctx[engine]))
172 instance = 0x80000000 | fctx->ramfc->vinst >> 8;
173 else
174 instance = 0x00000000;
175 nv_wr32(dev, 0x002600 + (i * 4), instance);
176 }
177
178 nv50_fifo_playlist_update(dev);
179
180 nv_wr32(dev, 0x003200, 1);
181 nv_wr32(dev, 0x003250, 1);
182 nv_wr32(dev, 0x002500, 1);
183 return 0;
184}
185
186static int
187nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
188{
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nv84_fifo_priv *priv = nv_engine(dev, engine);
191 int i;
192 u32 save;
193
194 /* set playlist length to zero, fifo will unload context */
195 nv_wr32(dev, 0x0032ec, 0);
196
197 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
198
199 /* tell all connected engines to unload their contexts */
200 for (i = 0; i < priv->base.channels; i++) {
201 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan)
203 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
204 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
205 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
206 return -EBUSY;
207 }
208 }
209
210 nv_wr32(dev, 0x002520, save);
211 nv_wr32(dev, 0x002140, 0);
212 return 0;
213}
214
215int
216nv84_fifo_create(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nv84_fifo_priv *priv;
220 int ret;
221
222 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
223 if (!priv)
224 return -ENOMEM;
225
226 priv->base.base.destroy = nv50_fifo_destroy;
227 priv->base.base.init = nv84_fifo_init;
228 priv->base.base.fini = nv84_fifo_fini;
229 priv->base.base.context_new = nv84_fifo_context_new;
230 priv->base.base.context_del = nv84_fifo_context_del;
231 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
232 priv->base.channels = 127;
233 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
234
235 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
236 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
237 if (ret)
238 goto error;
239
240 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
241 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
242 if (ret)
243 goto error;
244
245 nouveau_irq_register(dev, 8, nv04_fifo_isr);
246error:
247 if (ret)
248 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
249 return ret;
250}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
deleted file mode 100644
index 6570d300ab85..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_vp.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_vp.c...
34 */
35
36struct nv84_vp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00020000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_vp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
55 return 0;
56}
57
58static void
59nv84_vp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_vp_engine *pvp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, VP);
64
65 kfree(pvp);
66}
67
68int
69nv84_vp_create(struct drm_device *dev)
70{
71 struct nv84_vp_engine *pvp;
72
73 pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
74 if (!pvp)
75 return -ENOMEM;
76
77 pvp->base.destroy = nv84_vp_destroy;
78 pvp->base.init = nv84_vp_init;
79 pvp->base.fini = nv84_vp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
deleted file mode 100644
index e25e13fb894e..000000000000
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ /dev/null
@@ -1,216 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31
32#include "nv98_crypt.fuc.h"
33
34struct nv98_crypt_priv {
35 struct nouveau_exec_engine base;
36};
37
38struct nv98_crypt_chan {
39 struct nouveau_gpuobj *mem;
40};
41
42static int
43nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
44{
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
48 struct nv98_crypt_chan *cctx;
49 int ret;
50
51 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
52 if (!cctx)
53 return -ENOMEM;
54
55 atomic_inc(&chan->vm->engref[engine]);
56
57 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
59 if (ret)
60 goto error;
61
62 nv_wo32(chan->ramin, 0xa0, 0x00190000);
63 nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
64 nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
65 nv_wo32(chan->ramin, 0xac, 0x00000000);
66 nv_wo32(chan->ramin, 0xb0, 0x00000000);
67 nv_wo32(chan->ramin, 0xb4, 0x00000000);
68 dev_priv->engine.instmem.flush(dev);
69
70error:
71 if (ret)
72 priv->base.context_del(chan, engine);
73 return ret;
74}
75
76static void
77nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
78{
79 struct nv98_crypt_chan *cctx = chan->engctx[engine];
80 int i;
81
82 for (i = 0xa0; i < 0xb4; i += 4)
83 nv_wo32(chan->ramin, i, 0x00000000);
84
85 nouveau_gpuobj_ref(NULL, &cctx->mem);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89 kfree(cctx);
90}
91
92static int
93nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
94 u32 handle, u16 class)
95{
96 struct nv98_crypt_chan *cctx = chan->engctx[engine];
97
98 /* fuc engine doesn't need an object, our ramht code does.. */
99 cctx->mem->engine = 5;
100 cctx->mem->class = class;
101 return nouveau_ramht_insert(chan, handle, cctx->mem);
102}
103
104static void
105nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
106{
107 nv50_vm_flush_engine(dev, 0x0a);
108}
109
110static int
111nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
112{
113 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
114 return 0;
115}
116
117static int
118nv98_crypt_init(struct drm_device *dev, int engine)
119{
120 int i;
121
122 /* reset! */
123 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
124 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
125
126 /* wait for exit interrupt to signal */
127 nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
128 nv_wr32(dev, 0x087004, 0x00000010);
129
130 /* upload microcode code and data segments */
131 nv_wr32(dev, 0x087ff8, 0x00100000);
132 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
133 nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
134
135 nv_wr32(dev, 0x087ff8, 0x00000000);
136 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
137 nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
138
139 /* start it running */
140 nv_wr32(dev, 0x08710c, 0x00000000);
141 nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
142 nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
143 return 0;
144}
145
146static struct nouveau_enum nv98_crypt_isr_error_name[] = {
147 { 0x0000, "ILLEGAL_MTHD" },
148 { 0x0001, "INVALID_BITFIELD" },
149 { 0x0002, "INVALID_ENUM" },
150 { 0x0003, "QUERY" },
151 {}
152};
153
154static void
155nv98_crypt_isr(struct drm_device *dev)
156{
157 u32 disp = nv_rd32(dev, 0x08701c);
158 u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
159 u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
160 u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
161 u32 addr = nv_rd32(dev, 0x087040) >> 16;
162 u32 mthd = (addr & 0x07ff) << 2;
163 u32 subc = (addr & 0x3800) >> 11;
164 u32 data = nv_rd32(dev, 0x087044);
165 int chid = nv50_graph_isr_chid(dev, inst);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
169 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
170 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, 0x087004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, 0x087004, stat);
179 }
180
181 nv50_fb_vm_trap(dev, 1);
182}
183
184static void
185nv98_crypt_destroy(struct drm_device *dev, int engine)
186{
187 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
188
189 nouveau_irq_unregister(dev, 14);
190 NVOBJ_ENGINE_DEL(dev, CRYPT);
191 kfree(priv);
192}
193
194int
195nv98_crypt_create(struct drm_device *dev)
196{
197 struct nv98_crypt_priv *priv;
198
199 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
200 if (!priv)
201 return -ENOMEM;
202
203 priv->base.destroy = nv98_crypt_destroy;
204 priv->base.init = nv98_crypt_init;
205 priv->base.fini = nv98_crypt_fini;
206 priv->base.context_new = nv98_crypt_context_new;
207 priv->base.context_del = nv98_crypt_context_del;
208 priv->base.object_new = nv98_crypt_object_new;
209 priv->base.tlb_flush = nv98_crypt_tlb_flush;
210
211 nouveau_irq_register(dev, 14, nv98_crypt_isr);
212
213 NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
214 NVOBJ_CLASS(dev, 0x88b4, CRYPT);
215 return 0;
216}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
deleted file mode 100644
index 0387dc7f4f42..000000000000
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nva3_copy.fuc.h"
32
33struct nva3_copy_engine {
34 struct nouveau_exec_engine base;
35};
36
37static int
38nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39{
40 struct drm_device *dev = chan->dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *ramin = chan->ramin;
43 struct nouveau_gpuobj *ctx = NULL;
44 int ret;
45
46 NV_DEBUG(dev, "ch%d\n", chan->id);
47
48 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
49 NVOBJ_FLAG_ZERO_FREE, &ctx);
50 if (ret)
51 return ret;
52
53 nv_wo32(ramin, 0xc0, 0x00190000);
54 nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
55 nv_wo32(ramin, 0xc8, ctx->vinst);
56 nv_wo32(ramin, 0xcc, 0x00000000);
57 nv_wo32(ramin, 0xd0, 0x00000000);
58 nv_wo32(ramin, 0xd4, 0x00000000);
59 dev_priv->engine.instmem.flush(dev);
60
61 atomic_inc(&chan->vm->engref[engine]);
62 chan->engctx[engine] = ctx;
63 return 0;
64}
65
66static int
67nva3_copy_object_new(struct nouveau_channel *chan, int engine,
68 u32 handle, u16 class)
69{
70 struct nouveau_gpuobj *ctx = chan->engctx[engine];
71
72 /* fuc engine doesn't need an object, our ramht code does.. */
73 ctx->engine = 3;
74 ctx->class = class;
75 return nouveau_ramht_insert(chan, handle, ctx);
76}
77
78static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 int i;
83
84 for (i = 0xc0; i <= 0xd4; i += 4)
85 nv_wo32(chan->ramin, i, 0x00000000);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 nouveau_gpuobj_ref(NULL, &ctx);
89 chan->engctx[engine] = ctx;
90}
91
92static void
93nva3_copy_tlb_flush(struct drm_device *dev, int engine)
94{
95 nv50_vm_flush_engine(dev, 0x0d);
96}
97
98static int
99nva3_copy_init(struct drm_device *dev, int engine)
100{
101 int i;
102
103 nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
104 nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
105 nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
106
107 /* upload ucode */
108 nv_wr32(dev, 0x1041c0, 0x01000000);
109 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
110 nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
111
112 nv_wr32(dev, 0x104180, 0x01000000);
113 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
114 if ((i & 0x3f) == 0)
115 nv_wr32(dev, 0x104188, i >> 6);
116 nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
117 }
118
119 /* start it running */
120 nv_wr32(dev, 0x10410c, 0x00000000);
121 nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
122 nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
123 return 0;
124}
125
126static int
127nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
128{
129 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
130 nv_wr32(dev, 0x104014, 0xffffffff);
131 return 0;
132}
133
134static struct nouveau_enum nva3_copy_isr_error_name[] = {
135 { 0x0001, "ILLEGAL_MTHD" },
136 { 0x0002, "INVALID_ENUM" },
137 { 0x0003, "INVALID_BITFIELD" },
138 {}
139};
140
141static void
142nva3_copy_isr(struct drm_device *dev)
143{
144 u32 dispatch = nv_rd32(dev, 0x10401c);
145 u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
146 u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
147 u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
148 u32 addr = nv_rd32(dev, 0x104040) >> 16;
149 u32 mthd = (addr & 0x07ff) << 2;
150 u32 subc = (addr & 0x3800) >> 11;
151 u32 data = nv_rd32(dev, 0x104044);
152 int chid = nv50_graph_isr_chid(dev, inst);
153
154 if (stat & 0x00000040) {
155 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
156 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
157 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
158 chid, inst, subc, mthd, data);
159 nv_wr32(dev, 0x104004, 0x00000040);
160 stat &= ~0x00000040;
161 }
162
163 if (stat) {
164 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
165 nv_wr32(dev, 0x104004, stat);
166 }
167 nv50_fb_vm_trap(dev, 1);
168}
169
170static void
171nva3_copy_destroy(struct drm_device *dev, int engine)
172{
173 struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
174
175 nouveau_irq_unregister(dev, 22);
176
177 NVOBJ_ENGINE_DEL(dev, COPY0);
178 kfree(pcopy);
179}
180
181int
182nva3_copy_create(struct drm_device *dev)
183{
184 struct nva3_copy_engine *pcopy;
185
186 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
187 if (!pcopy)
188 return -ENOMEM;
189
190 pcopy->base.destroy = nva3_copy_destroy;
191 pcopy->base.init = nva3_copy_init;
192 pcopy->base.fini = nva3_copy_fini;
193 pcopy->base.context_new = nva3_copy_context_new;
194 pcopy->base.context_del = nva3_copy_context_del;
195 pcopy->base.object_new = nva3_copy_object_new;
196 pcopy->base.tlb_flush = nva3_copy_tlb_flush;
197
198 nouveau_irq_register(dev, 22, nva3_copy_isr);
199
200 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
201 NVOBJ_CLASS(dev, 0x85b5, COPY0);
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 798829353fb6..3f69e46436cf 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -23,17 +23,24 @@
23 */ 23 */
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30#include <subdev/bios/pll.h>
31#include <subdev/bios.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
30static u32 read_clk(struct drm_device *, int, bool); 36static u32 read_clk(struct drm_device *, int, bool);
31static u32 read_pll(struct drm_device *, int, u32); 37static u32 read_pll(struct drm_device *, int, u32);
32 38
33static u32 39static u32
34read_vco(struct drm_device *dev, int clk) 40read_vco(struct drm_device *dev, int clk)
35{ 41{
36 u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); 42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
37 if ((sctl & 0x00000030) != 0x00000030) 44 if ((sctl & 0x00000030) != 0x00000030)
38 return read_pll(dev, 0x41, 0x00e820); 45 return read_pll(dev, 0x41, 0x00e820);
39 return read_pll(dev, 0x42, 0x00e8a0); 46 return read_pll(dev, 0x42, 0x00e8a0);
@@ -42,26 +49,27 @@ read_vco(struct drm_device *dev, int clk)
42static u32 49static u32
43read_clk(struct drm_device *dev, int clk, bool ignore_en) 50read_clk(struct drm_device *dev, int clk, bool ignore_en)
44{ 51{
45 struct drm_nouveau_private *dev_priv = dev->dev_private; 52 struct nouveau_device *device = nouveau_dev(dev);
53 struct nouveau_drm *drm = nouveau_drm(dev);
46 u32 sctl, sdiv, sclk; 54 u32 sctl, sdiv, sclk;
47 55
48 /* refclk for the 0xe8xx plls is a fixed frequency */ 56 /* refclk for the 0xe8xx plls is a fixed frequency */
49 if (clk >= 0x40) { 57 if (clk >= 0x40) {
50 if (dev_priv->chipset == 0xaf) { 58 if (nv_device(drm->device)->chipset == 0xaf) {
51 /* no joke.. seriously.. sigh.. */ 59 /* no joke.. seriously.. sigh.. */
52 return nv_rd32(dev, 0x00471c) * 1000; 60 return nv_rd32(device, 0x00471c) * 1000;
53 } 61 }
54 62
55 return dev_priv->crystal; 63 return device->crystal;
56 } 64 }
57 65
58 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); 66 sctl = nv_rd32(device, 0x4120 + (clk * 4));
59 if (!ignore_en && !(sctl & 0x00000100)) 67 if (!ignore_en && !(sctl & 0x00000100))
60 return 0; 68 return 0;
61 69
62 switch (sctl & 0x00003000) { 70 switch (sctl & 0x00003000) {
63 case 0x00000000: 71 case 0x00000000:
64 return dev_priv->crystal; 72 return device->crystal;
65 case 0x00002000: 73 case 0x00002000:
66 if (sctl & 0x00000040) 74 if (sctl & 0x00000040)
67 return 108000; 75 return 108000;
@@ -78,12 +86,13 @@ read_clk(struct drm_device *dev, int clk, bool ignore_en)
78static u32 86static u32
79read_pll(struct drm_device *dev, int clk, u32 pll) 87read_pll(struct drm_device *dev, int clk, u32 pll)
80{ 88{
81 u32 ctrl = nv_rd32(dev, pll + 0); 89 struct nouveau_device *device = nouveau_dev(dev);
90 u32 ctrl = nv_rd32(device, pll + 0);
82 u32 sclk = 0, P = 1, N = 1, M = 1; 91 u32 sclk = 0, P = 1, N = 1, M = 1;
83 92
84 if (!(ctrl & 0x00000008)) { 93 if (!(ctrl & 0x00000008)) {
85 if (ctrl & 0x00000001) { 94 if (ctrl & 0x00000001) {
86 u32 coef = nv_rd32(dev, pll + 4); 95 u32 coef = nv_rd32(device, pll + 4);
87 M = (coef & 0x000000ff) >> 0; 96 M = (coef & 0x000000ff) >> 0;
88 N = (coef & 0x0000ff00) >> 8; 97 N = (coef & 0x0000ff00) >> 8;
89 P = (coef & 0x003f0000) >> 16; 98 P = (coef & 0x003f0000) >> 16;
@@ -111,7 +120,10 @@ struct creg {
111static int 120static int
112calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg) 121calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
113{ 122{
114 struct pll_lims limits; 123 struct nouveau_drm *drm = nouveau_drm(dev);
124 struct nouveau_device *device = nouveau_dev(dev);
125 struct nouveau_bios *bios = nouveau_bios(device);
126 struct nvbios_pll limits;
115 u32 oclk, sclk, sdiv; 127 u32 oclk, sclk, sdiv;
116 int P, N, M, diff; 128 int P, N, M, diff;
117 int ret; 129 int ret;
@@ -119,7 +131,7 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
119 reg->pll = 0; 131 reg->pll = 0;
120 reg->clk = 0; 132 reg->clk = 0;
121 if (!khz) { 133 if (!khz) {
122 NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk); 134 NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
123 return 0; 135 return 0;
124 } 136 }
125 137
@@ -154,14 +166,14 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
154 } 166 }
155 167
156 if (!pll) { 168 if (!pll) {
157 NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk); 169 NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
158 return -ERANGE; 170 return -ERANGE;
159 } 171 }
160 172
161 break; 173 break;
162 } 174 }
163 175
164 ret = get_pll_limits(dev, pll, &limits); 176 ret = nvbios_pll_parse(bios, pll, &limits);
165 if (ret) 177 if (ret)
166 return ret; 178 return ret;
167 179
@@ -171,54 +183,60 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
171 183
172 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); 184 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
173 if (ret >= 0) { 185 if (ret >= 0) {
174 reg->clk = nv_rd32(dev, 0x4120 + (clk * 4)); 186 reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
175 reg->pll = (P << 16) | (N << 8) | M; 187 reg->pll = (P << 16) | (N << 8) | M;
176 } 188 }
189
177 return ret; 190 return ret;
178} 191}
179 192
180static void 193static void
181prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg) 194prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
182{ 195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
183 const u32 src0 = 0x004120 + (clk * 4); 198 const u32 src0 = 0x004120 + (clk * 4);
184 const u32 src1 = 0x004160 + (clk * 4); 199 const u32 src1 = 0x004160 + (clk * 4);
185 const u32 ctrl = pll + 0; 200 const u32 ctrl = pll + 0;
186 const u32 coef = pll + 4; 201 const u32 coef = pll + 4;
187 202
188 if (!reg->clk && !reg->pll) { 203 if (!reg->clk && !reg->pll) {
189 NV_DEBUG(dev, "no clock for %02x\n", clk); 204 NV_DEBUG(drm, "no clock for %02x\n", clk);
190 return; 205 return;
191 } 206 }
192 207
193 if (reg->pll) { 208 if (reg->pll) {
194 nv_mask(dev, src0, 0x00000101, 0x00000101); 209 nv_mask(device, src0, 0x00000101, 0x00000101);
195 nv_wr32(dev, coef, reg->pll); 210 nv_wr32(device, coef, reg->pll);
196 nv_mask(dev, ctrl, 0x00000015, 0x00000015); 211 nv_mask(device, ctrl, 0x00000015, 0x00000015);
197 nv_mask(dev, ctrl, 0x00000010, 0x00000000); 212 nv_mask(device, ctrl, 0x00000010, 0x00000000);
198 nv_wait(dev, ctrl, 0x00020000, 0x00020000); 213 nv_wait(device, ctrl, 0x00020000, 0x00020000);
199 nv_mask(dev, ctrl, 0x00000010, 0x00000010); 214 nv_mask(device, ctrl, 0x00000010, 0x00000010);
200 nv_mask(dev, ctrl, 0x00000008, 0x00000000); 215 nv_mask(device, ctrl, 0x00000008, 0x00000000);
201 nv_mask(dev, src1, 0x00000100, 0x00000000); 216 nv_mask(device, src1, 0x00000100, 0x00000000);
202 nv_mask(dev, src1, 0x00000001, 0x00000000); 217 nv_mask(device, src1, 0x00000001, 0x00000000);
203 } else { 218 } else {
204 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); 219 nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
205 nv_mask(dev, ctrl, 0x00000018, 0x00000018); 220 nv_mask(device, ctrl, 0x00000018, 0x00000018);
206 udelay(20); 221 udelay(20);
207 nv_mask(dev, ctrl, 0x00000001, 0x00000000); 222 nv_mask(device, ctrl, 0x00000001, 0x00000000);
208 nv_mask(dev, src0, 0x00000100, 0x00000000); 223 nv_mask(device, src0, 0x00000100, 0x00000000);
209 nv_mask(dev, src0, 0x00000001, 0x00000000); 224 nv_mask(device, src0, 0x00000001, 0x00000000);
210 } 225 }
211} 226}
212 227
213static void 228static void
214prog_clk(struct drm_device *dev, int clk, struct creg *reg) 229prog_clk(struct drm_device *dev, int clk, struct creg *reg)
215{ 230{
231 struct nouveau_device *device = nouveau_dev(dev);
232 struct nouveau_drm *drm = nouveau_drm(dev);
233
216 if (!reg->clk) { 234 if (!reg->clk) {
217 NV_DEBUG(dev, "no clock for %02x\n", clk); 235 NV_DEBUG(drm, "no clock for %02x\n", clk);
218 return; 236 return;
219 } 237 }
220 238
221 nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk); 239 nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
222} 240}
223 241
224int 242int
@@ -309,10 +327,11 @@ static bool
309nva3_pm_grcp_idle(void *data) 327nva3_pm_grcp_idle(void *data)
310{ 328{
311 struct drm_device *dev = data; 329 struct drm_device *dev = data;
330 struct nouveau_device *device = nouveau_dev(dev);
312 331
313 if (!(nv_rd32(dev, 0x400304) & 0x00000001)) 332 if (!(nv_rd32(device, 0x400304) & 0x00000001))
314 return true; 333 return true;
315 if (nv_rd32(dev, 0x400308) == 0x0050001c) 334 if (nv_rd32(device, 0x400308) == 0x0050001c)
316 return true; 335 return true;
317 return false; 336 return false;
318} 337}
@@ -320,85 +339,91 @@ nva3_pm_grcp_idle(void *data)
320static void 339static void
321mclk_precharge(struct nouveau_mem_exec_func *exec) 340mclk_precharge(struct nouveau_mem_exec_func *exec)
322{ 341{
323 nv_wr32(exec->dev, 0x1002d4, 0x00000001); 342 struct nouveau_device *device = nouveau_dev(exec->dev);
343 nv_wr32(device, 0x1002d4, 0x00000001);
324} 344}
325 345
326static void 346static void
327mclk_refresh(struct nouveau_mem_exec_func *exec) 347mclk_refresh(struct nouveau_mem_exec_func *exec)
328{ 348{
329 nv_wr32(exec->dev, 0x1002d0, 0x00000001); 349 struct nouveau_device *device = nouveau_dev(exec->dev);
350 nv_wr32(device, 0x1002d0, 0x00000001);
330} 351}
331 352
332static void 353static void
333mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) 354mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
334{ 355{
335 nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000); 356 struct nouveau_device *device = nouveau_dev(exec->dev);
357 nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
336} 358}
337 359
338static void 360static void
339mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) 361mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
340{ 362{
341 nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000); 363 struct nouveau_device *device = nouveau_dev(exec->dev);
364 nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
342} 365}
343 366
344static void 367static void
345mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) 368mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
346{ 369{
347 volatile u32 post = nv_rd32(exec->dev, 0); (void)post; 370 struct nouveau_device *device = nouveau_dev(exec->dev);
371 volatile u32 post = nv_rd32(device, 0); (void)post;
348 udelay((nsec + 500) / 1000); 372 udelay((nsec + 500) / 1000);
349} 373}
350 374
351static u32 375static u32
352mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 376mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
353{ 377{
378 struct nouveau_device *device = nouveau_dev(exec->dev);
354 if (mr <= 1) 379 if (mr <= 1)
355 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); 380 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
356 if (mr <= 3) 381 if (mr <= 3)
357 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); 382 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
358 return 0; 383 return 0;
359} 384}
360 385
361static void 386static void
362mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 387mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
363{ 388{
364 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 389 struct nouveau_device *device = nouveau_dev(exec->dev);
365 390 struct nouveau_fb *pfb = nouveau_fb(device);
366 if (mr <= 1) { 391 if (mr <= 1) {
367 if (dev_priv->vram_rank_B) 392 if (pfb->ram.ranks > 1)
368 nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data); 393 nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
369 nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data); 394 nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
370 } else 395 } else
371 if (mr <= 3) { 396 if (mr <= 3) {
372 if (dev_priv->vram_rank_B) 397 if (pfb->ram.ranks > 1)
373 nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data); 398 nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
374 nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data); 399 nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
375 } 400 }
376} 401}
377 402
378static void 403static void
379mclk_clock_set(struct nouveau_mem_exec_func *exec) 404mclk_clock_set(struct nouveau_mem_exec_func *exec)
380{ 405{
381 struct drm_device *dev = exec->dev; 406 struct nouveau_device *device = nouveau_dev(exec->dev);
382 struct nva3_pm_state *info = exec->priv; 407 struct nva3_pm_state *info = exec->priv;
383 u32 ctrl; 408 u32 ctrl;
384 409
385 ctrl = nv_rd32(dev, 0x004000); 410 ctrl = nv_rd32(device, 0x004000);
386 if (!(ctrl & 0x00000008) && info->mclk.pll) { 411 if (!(ctrl & 0x00000008) && info->mclk.pll) {
387 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); 412 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
388 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); 413 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
389 nv_wr32(dev, 0x004018, 0x00001000); 414 nv_wr32(device, 0x004018, 0x00001000);
390 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001)); 415 nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
391 nv_wr32(dev, 0x004004, info->mclk.pll); 416 nv_wr32(device, 0x004004, info->mclk.pll);
392 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); 417 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
393 udelay(64); 418 udelay(64);
394 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); 419 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
395 udelay(20); 420 udelay(20);
396 } else 421 } else
397 if (!info->mclk.pll) { 422 if (!info->mclk.pll) {
398 nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk); 423 nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
399 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); 424 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
400 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); 425 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
401 nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018); 426 nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
402 } 427 }
403 428
404 if (info->rammap) { 429 if (info->rammap) {
@@ -410,67 +435,68 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
410 (info->ramcfg[3] & 0x0f) << 16 | 435 (info->ramcfg[3] & 0x0f) << 16 |
411 (info->ramcfg[9] & 0x0f) | 436 (info->ramcfg[9] & 0x0f) |
412 0x80000000; 437 0x80000000;
413 nv_wr32(dev, 0x1005a0, unk5a0); 438 nv_wr32(device, 0x1005a0, unk5a0);
414 nv_wr32(dev, 0x1005a4, unk5a4); 439 nv_wr32(device, 0x1005a4, unk5a4);
415 nv_wr32(dev, 0x10f804, unk804); 440 nv_wr32(device, 0x10f804, unk804);
416 nv_mask(dev, 0x10053c, 0x00001000, 0x00000000); 441 nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
417 } else { 442 } else {
418 nv_mask(dev, 0x10053c, 0x00001000, 0x00001000); 443 nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
419 nv_mask(dev, 0x10f804, 0x80000000, 0x00000000); 444 nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
420 nv_mask(dev, 0x100760, 0x22222222, info->r100760); 445 nv_mask(device, 0x100760, 0x22222222, info->r100760);
421 nv_mask(dev, 0x1007a0, 0x22222222, info->r100760); 446 nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
422 nv_mask(dev, 0x1007e0, 0x22222222, info->r100760); 447 nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
423 } 448 }
424 } 449 }
425 450
426 if (info->mclk.pll) { 451 if (info->mclk.pll) {
427 nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000); 452 nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
428 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008)); 453 nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
429 } 454 }
430} 455}
431 456
432static void 457static void
433mclk_timing_set(struct nouveau_mem_exec_func *exec) 458mclk_timing_set(struct nouveau_mem_exec_func *exec)
434{ 459{
435 struct drm_device *dev = exec->dev; 460 struct nouveau_device *device = nouveau_dev(exec->dev);
436 struct nva3_pm_state *info = exec->priv; 461 struct nva3_pm_state *info = exec->priv;
437 struct nouveau_pm_level *perflvl = info->perflvl; 462 struct nouveau_pm_level *perflvl = info->perflvl;
438 int i; 463 int i;
439 464
440 for (i = 0; i < 9; i++) 465 for (i = 0; i < 9; i++)
441 nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]); 466 nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
442 467
443 if (info->ramcfg) { 468 if (info->ramcfg) {
444 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000; 469 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
445 nv_mask(dev, 0x100200, 0x00001000, data); 470 nv_mask(device, 0x100200, 0x00001000, data);
446 } 471 }
447 472
448 if (info->ramcfg) { 473 if (info->ramcfg) {
449 u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010; 474 u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
450 u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100; 475 u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
451 u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100; 476 u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
452 if ( (info->ramcfg[2] & 0x20)) 477 if ( (info->ramcfg[2] & 0x20))
453 unk714 |= 0xf0000000; 478 unk714 |= 0xf0000000;
454 if (!(info->ramcfg[2] & 0x04)) 479 if (!(info->ramcfg[2] & 0x04))
455 unk714 |= 0x00000010; 480 unk714 |= 0x00000010;
456 nv_wr32(dev, 0x100714, unk714); 481 nv_wr32(device, 0x100714, unk714);
457 482
458 if (info->ramcfg[2] & 0x01) 483 if (info->ramcfg[2] & 0x01)
459 unk71c |= 0x00000100; 484 unk71c |= 0x00000100;
460 nv_wr32(dev, 0x10071c, unk71c); 485 nv_wr32(device, 0x10071c, unk71c);
461 486
462 if (info->ramcfg[2] & 0x02) 487 if (info->ramcfg[2] & 0x02)
463 unk718 |= 0x00000100; 488 unk718 |= 0x00000100;
464 nv_wr32(dev, 0x100718, unk718); 489 nv_wr32(device, 0x100718, unk718);
465 490
466 if (info->ramcfg[2] & 0x10) 491 if (info->ramcfg[2] & 0x10)
467 nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/ 492 nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
468 } 493 }
469} 494}
470 495
471static void 496static void
472prog_mem(struct drm_device *dev, struct nva3_pm_state *info) 497prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
473{ 498{
499 struct nouveau_device *device = nouveau_dev(dev);
474 struct nouveau_mem_exec_func exec = { 500 struct nouveau_mem_exec_func exec = {
475 .dev = dev, 501 .dev = dev,
476 .precharge = mclk_precharge, 502 .precharge = mclk_precharge,
@@ -492,17 +518,17 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
492 info->r100760 = 0x22222222; 518 info->r100760 = 0x22222222;
493 } 519 }
494 520
495 ctrl = nv_rd32(dev, 0x004000); 521 ctrl = nv_rd32(device, 0x004000);
496 if (ctrl & 0x00000008) { 522 if (ctrl & 0x00000008) {
497 if (info->mclk.pll) { 523 if (info->mclk.pll) {
498 nv_mask(dev, 0x004128, 0x00000101, 0x00000101); 524 nv_mask(device, 0x004128, 0x00000101, 0x00000101);
499 nv_wr32(dev, 0x004004, info->mclk.pll); 525 nv_wr32(device, 0x004004, info->mclk.pll);
500 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); 526 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
501 nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef)); 527 nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
502 nv_wait(dev, 0x004000, 0x00020000, 0x00020000); 528 nv_wait(device, 0x004000, 0x00020000, 0x00020000);
503 nv_wr32(dev, 0x004000, (ctrl |= 0x00000010)); 529 nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
504 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); 530 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
505 nv_wr32(dev, 0x004000, (ctrl |= 0x00000004)); 531 nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
506 } 532 }
507 } else { 533 } else {
508 u32 ssel = 0x00000101; 534 u32 ssel = 0x00000101;
@@ -510,68 +536,67 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
510 ssel |= info->mclk.clk; 536 ssel |= info->mclk.clk;
511 else 537 else
512 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */ 538 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
513 nv_mask(dev, 0x004168, 0x003f3141, ctrl); 539 nv_mask(device, 0x004168, 0x003f3141, ctrl);
514 } 540 }
515 541
516 if (info->ramcfg) { 542 if (info->ramcfg) {
517 if (info->ramcfg[2] & 0x10) { 543 if (info->ramcfg[2] & 0x10) {
518 nv_mask(dev, 0x111104, 0x00000600, 0x00000000); 544 nv_mask(device, 0x111104, 0x00000600, 0x00000000);
519 } else { 545 } else {
520 nv_mask(dev, 0x111100, 0x40000000, 0x40000000); 546 nv_mask(device, 0x111100, 0x40000000, 0x40000000);
521 nv_mask(dev, 0x111104, 0x00000180, 0x00000000); 547 nv_mask(device, 0x111104, 0x00000180, 0x00000000);
522 } 548 }
523 } 549 }
524 if (info->rammap && !(info->rammap[4] & 0x02)) 550 if (info->rammap && !(info->rammap[4] & 0x02))
525 nv_mask(dev, 0x100200, 0x00000800, 0x00000000); 551 nv_mask(device, 0x100200, 0x00000800, 0x00000000);
526 nv_wr32(dev, 0x611200, 0x00003300); 552 nv_wr32(device, 0x611200, 0x00003300);
527 if (!(info->ramcfg[2] & 0x10)) 553 if (!(info->ramcfg[2] & 0x10))
528 nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/ 554 nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
529 555
530 nouveau_mem_exec(&exec, info->perflvl); 556 nouveau_mem_exec(&exec, info->perflvl);
531 557
532 nv_wr32(dev, 0x611200, 0x00003330); 558 nv_wr32(device, 0x611200, 0x00003330);
533 if (info->rammap && (info->rammap[4] & 0x02)) 559 if (info->rammap && (info->rammap[4] & 0x02))
534 nv_mask(dev, 0x100200, 0x00000800, 0x00000800); 560 nv_mask(device, 0x100200, 0x00000800, 0x00000800);
535 if (info->ramcfg) { 561 if (info->ramcfg) {
536 if (info->ramcfg[2] & 0x10) { 562 if (info->ramcfg[2] & 0x10) {
537 nv_mask(dev, 0x111104, 0x00000180, 0x00000180); 563 nv_mask(device, 0x111104, 0x00000180, 0x00000180);
538 nv_mask(dev, 0x111100, 0x40000000, 0x00000000); 564 nv_mask(device, 0x111100, 0x40000000, 0x00000000);
539 } else { 565 } else {
540 nv_mask(dev, 0x111104, 0x00000600, 0x00000600); 566 nv_mask(device, 0x111104, 0x00000600, 0x00000600);
541 } 567 }
542 } 568 }
543 569
544 if (info->mclk.pll) { 570 if (info->mclk.pll) {
545 nv_mask(dev, 0x004168, 0x00000001, 0x00000000); 571 nv_mask(device, 0x004168, 0x00000001, 0x00000000);
546 nv_mask(dev, 0x004168, 0x00000100, 0x00000000); 572 nv_mask(device, 0x004168, 0x00000100, 0x00000000);
547 } else { 573 } else {
548 nv_mask(dev, 0x004000, 0x00000001, 0x00000000); 574 nv_mask(device, 0x004000, 0x00000001, 0x00000000);
549 nv_mask(dev, 0x004128, 0x00000001, 0x00000000); 575 nv_mask(device, 0x004128, 0x00000001, 0x00000000);
550 nv_mask(dev, 0x004128, 0x00000100, 0x00000000); 576 nv_mask(device, 0x004128, 0x00000100, 0x00000000);
551 } 577 }
552} 578}
553 579
554int 580int
555nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) 581nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
556{ 582{
557 struct drm_nouveau_private *dev_priv = dev->dev_private; 583 struct nouveau_device *device = nouveau_dev(dev);
584 struct nouveau_drm *drm = nouveau_drm(dev);
558 struct nva3_pm_state *info = pre_state; 585 struct nva3_pm_state *info = pre_state;
559 unsigned long flags;
560 int ret = -EAGAIN; 586 int ret = -EAGAIN;
561 587
562 /* prevent any new grctx switches from starting */ 588 /* prevent any new grctx switches from starting */
563 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 589 nv_wr32(device, 0x400324, 0x00000000);
564 nv_wr32(dev, 0x400324, 0x00000000); 590 nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
565 nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
566 /* wait for any pending grctx switches to complete */ 591 /* wait for any pending grctx switches to complete */
567 if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) { 592 if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
568 NV_ERROR(dev, "pm: ctxprog didn't go idle\n"); 593 NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
569 goto cleanup; 594 goto cleanup;
570 } 595 }
571 /* freeze PFIFO */ 596 /* freeze PFIFO */
572 nv_mask(dev, 0x002504, 0x00000001, 0x00000001); 597 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
573 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) { 598 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
574 NV_ERROR(dev, "pm: fifo didn't go idle\n"); 599 NV_ERROR(drm, "pm: fifo didn't go idle\n");
575 goto cleanup; 600 goto cleanup;
576 } 601 }
577 602
@@ -587,14 +612,13 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
587 612
588cleanup: 613cleanup:
589 /* unfreeze PFIFO */ 614 /* unfreeze PFIFO */
590 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 615 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
591 /* restore ctxprog to normal */ 616 /* restore ctxprog to normal */
592 nv_wr32(dev, 0x400324, 0x00000000); 617 nv_wr32(device, 0x400324, 0x00000000);
593 nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */ 618 nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
594 /* unblock it if necessary */ 619 /* unblock it if necessary */
595 if (nv_rd32(dev, 0x400308) == 0x0050001c) 620 if (nv_rd32(device, 0x400308) == 0x0050001c)
596 nv_mask(dev, 0x400824, 0x10000000, 0x10000000); 621 nv_mask(device, 0x400824, 0x10000000, 0x10000000);
597 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
598 kfree(info); 622 kfree(info);
599 return ret; 623 return ret;
600} 624}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
deleted file mode 100644
index dddf006f6d88..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include "drmP.h"
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nvc0_copy.fuc.h"
32
33struct nvc0_copy_engine {
34 struct nouveau_exec_engine base;
35 u32 irq;
36 u32 pmc;
37 u32 fuc;
38 u32 ctx;
39};
40
41static int
42nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
43{
44 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *ramin = chan->ramin;
48 struct nouveau_gpuobj *ctx = NULL;
49 int ret;
50
51 ret = nouveau_gpuobj_new(dev, chan, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret)
55 return ret;
56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
59 dev_priv->engine.instmem.flush(dev);
60
61 chan->engctx[engine] = ctx;
62 return 0;
63}
64
65static int
66nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
67 u32 handle, u16 class)
68{
69 return 0;
70}
71
72static void
73nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
74{
75 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
76 struct nouveau_gpuobj *ctx = chan->engctx[engine];
77 struct drm_device *dev = chan->dev;
78 u32 inst;
79
80 inst = (chan->ramin->vinst >> 12);
81 inst |= 0x40000000;
82
83 /* disable fifo access */
84 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
85 /* mark channel as unloaded if it's currently active */
86 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
87 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
88 /* mark next channel as invalid if it's about to be loaded */
89 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
90 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
91 /* restore fifo access */
92 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
93
94 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
95 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
96 nouveau_gpuobj_ref(NULL, &ctx);
97
98 chan->engctx[engine] = ctx;
99}
100
101static int
102nvc0_copy_init(struct drm_device *dev, int engine)
103{
104 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
105 int i;
106
107 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
108 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
109 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
110
111 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
112 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
113 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
114
115 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
116 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
117 if ((i & 0x3f) == 0)
118 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
119 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
120 }
121
122 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
123 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
124 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
125 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
126 return 0;
127}
128
129static int
130nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133
134 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
135
136 /* trigger fuc context unload */
137 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
138 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
139 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
140 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
141
142 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
143 return 0;
144}
145
146static struct nouveau_enum nvc0_copy_isr_error_name[] = {
147 { 0x0001, "ILLEGAL_MTHD" },
148 { 0x0002, "INVALID_ENUM" },
149 { 0x0003, "INVALID_BITFIELD" },
150 {}
151};
152
153static void
154nvc0_copy_isr(struct drm_device *dev, int engine)
155{
156 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
157 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
158 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
159 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
160 u32 chid = nvc0_graph_isr_chid(dev, inst);
161 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
162 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
163 u32 mthd = (addr & 0x07ff) << 2;
164 u32 subc = (addr & 0x3800) >> 11;
165 u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
169 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
170 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, pcopy->fuc + 0x004, stat);
179 }
180}
181
182static void
183nvc0_copy_isr_0(struct drm_device *dev)
184{
185 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
186}
187
188static void
189nvc0_copy_isr_1(struct drm_device *dev)
190{
191 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
192}
193
194static void
195nvc0_copy_destroy(struct drm_device *dev, int engine)
196{
197 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
198
199 nouveau_irq_unregister(dev, pcopy->irq);
200
201 if (engine == NVOBJ_ENGINE_COPY0)
202 NVOBJ_ENGINE_DEL(dev, COPY0);
203 else
204 NVOBJ_ENGINE_DEL(dev, COPY1);
205 kfree(pcopy);
206}
207
208int
209nvc0_copy_create(struct drm_device *dev, int engine)
210{
211 struct nvc0_copy_engine *pcopy;
212
213 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
214 if (!pcopy)
215 return -ENOMEM;
216
217 pcopy->base.destroy = nvc0_copy_destroy;
218 pcopy->base.init = nvc0_copy_init;
219 pcopy->base.fini = nvc0_copy_fini;
220 pcopy->base.context_new = nvc0_copy_context_new;
221 pcopy->base.context_del = nvc0_copy_context_del;
222 pcopy->base.object_new = nvc0_copy_object_new;
223
224 if (engine == 0) {
225 pcopy->irq = 5;
226 pcopy->pmc = 0x00000040;
227 pcopy->fuc = 0x104000;
228 pcopy->ctx = 0x0230;
229 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
230 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
231 NVOBJ_CLASS(dev, 0x90b5, COPY0);
232 } else {
233 pcopy->irq = 6;
234 pcopy->pmc = 0x00000080;
235 pcopy->fuc = 0x105000;
236 pcopy->ctx = 0x0240;
237 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
238 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
239 NVOBJ_CLASS(dev, 0x90b8, COPY1);
240 }
241
242 return 0;
243}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
deleted file mode 100644
index f704e942372e..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29
30struct nvc0_fb_priv {
31 struct page *r100c10_page;
32 dma_addr_t r100c10;
33};
34
35static inline void
36nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
37{
38 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
39 u32 stat = nv_rd32(dev, subp_base + 0x020);
40
41 if (stat) {
42 NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
43 nv_wr32(dev, subp_base + 0x020, stat);
44 }
45}
46
47static void
48nvc0_mfb_isr(struct drm_device *dev)
49{
50 u32 units = nv_rd32(dev, 0x00017c);
51 while (units) {
52 u32 subp, unit = ffs(units) - 1;
53 for (subp = 0; subp < 2; subp++)
54 nvc0_mfb_subp_isr(dev, unit, subp);
55 units &= ~(1 << unit);
56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
62}
63
64static void
65nvc0_fb_destroy(struct drm_device *dev)
66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
69 struct nvc0_fb_priv *priv = pfb->priv;
70
71 nouveau_irq_unregister(dev, 25);
72
73 if (priv->r100c10_page) {
74 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
75 PCI_DMA_BIDIRECTIONAL);
76 __free_page(priv->r100c10_page);
77 }
78
79 kfree(priv);
80 pfb->priv = NULL;
81}
82
83static int
84nvc0_fb_create(struct drm_device *dev)
85{
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
88 struct nvc0_fb_priv *priv;
89
90 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
91 if (!priv)
92 return -ENOMEM;
93 pfb->priv = priv;
94
95 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
96 if (!priv->r100c10_page) {
97 nvc0_fb_destroy(dev);
98 return -ENOMEM;
99 }
100
101 priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
102 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
103 if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
104 nvc0_fb_destroy(dev);
105 return -EFAULT;
106 }
107
108 nouveau_irq_register(dev, 25, nvc0_mfb_isr);
109 return 0;
110}
111
112int
113nvc0_fb_init(struct drm_device *dev)
114{
115 struct drm_nouveau_private *dev_priv = dev->dev_private;
116 struct nvc0_fb_priv *priv;
117 int ret;
118
119 if (!dev_priv->engine.fb.priv) {
120 ret = nvc0_fb_create(dev);
121 if (ret)
122 return ret;
123 }
124 priv = dev_priv->engine.fb.priv;
125
126 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
127 return 0;
128}
129
130void
131nvc0_fb_takedown(struct drm_device *dev)
132{
133 nvc0_fb_destroy(dev);
134}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 797159e7b7a6..9dcd30f3e1e0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
31 28
32int 29int
33nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -157,12 +151,14 @@ nvc0_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev; 153 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 154 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); 160 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
161 0x902d, NULL, 0, &object);
166 if (ret) 162 if (ret)
167 return ret; 163 return ret;
168 164
@@ -202,9 +198,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
202 198
203 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); 199 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 200 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); 201 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 202 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); 203 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 47ab388a606e..53299eac9676 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,29 +22,44 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/client.h>
27#include <core/class.h>
28
29#include <engine/fifo.h>
30
31#include "nouveau_drm.h"
27#include "nouveau_dma.h" 32#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h" 33#include "nouveau_fence.h"
31 34
35#include "nv50_display.h"
36
32struct nvc0_fence_priv { 37struct nvc0_fence_priv {
33 struct nouveau_fence_priv base; 38 struct nouveau_fence_priv base;
34 struct nouveau_bo *bo; 39 struct nouveau_bo *bo;
40 u32 *suspend;
35}; 41};
36 42
37struct nvc0_fence_chan { 43struct nvc0_fence_chan {
38 struct nouveau_fence_chan base; 44 struct nouveau_fence_chan base;
39 struct nouveau_vma vma; 45 struct nouveau_vma vma;
46 struct nouveau_vma dispc_vma[4];
40}; 47};
41 48
49u64
50nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
51{
52 struct nvc0_fence_chan *fctx = chan->fence;
53 return fctx->dispc_vma[crtc].offset;
54}
55
42static int 56static int
43nvc0_fence_emit(struct nouveau_fence *fence) 57nvc0_fence_emit(struct nouveau_fence *fence)
44{ 58{
45 struct nouveau_channel *chan = fence->channel; 59 struct nouveau_channel *chan = fence->channel;
46 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 60 struct nvc0_fence_chan *fctx = chan->fence;
47 u64 addr = fctx->vma.offset + chan->id * 16; 61 struct nouveau_fifo_chan *fifo = (void *)chan->object;
62 u64 addr = fctx->vma.offset + fifo->chid * 16;
48 int ret; 63 int ret;
49 64
50 ret = RING_SPACE(chan, 5); 65 ret = RING_SPACE(chan, 5);
@@ -64,8 +79,9 @@ static int
64nvc0_fence_sync(struct nouveau_fence *fence, 79nvc0_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan) 80 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{ 81{
67 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 82 struct nvc0_fence_chan *fctx = chan->fence;
68 u64 addr = fctx->vma.offset + prev->id * 16; 83 struct nouveau_fifo_chan *fifo = (void *)prev->object;
84 u64 addr = fctx->vma.offset + fifo->chid * 16;
69 int ret; 85 int ret;
70 86
71 ret = RING_SPACE(chan, 5); 87 ret = RING_SPACE(chan, 5);
@@ -85,91 +101,135 @@ nvc0_fence_sync(struct nouveau_fence *fence,
85static u32 101static u32
86nvc0_fence_read(struct nouveau_channel *chan) 102nvc0_fence_read(struct nouveau_channel *chan)
87{ 103{
88 struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 104 struct nouveau_fifo_chan *fifo = (void *)chan->object;
89 return nouveau_bo_rd32(priv->bo, chan->id * 16/4); 105 struct nvc0_fence_priv *priv = chan->drm->fence;
106 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
90} 107}
91 108
92static void 109static void
93nvc0_fence_context_del(struct nouveau_channel *chan, int engine) 110nvc0_fence_context_del(struct nouveau_channel *chan)
94{ 111{
95 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine); 112 struct drm_device *dev = chan->drm->dev;
96 struct nvc0_fence_chan *fctx = chan->engctx[engine]; 113 struct nvc0_fence_priv *priv = chan->drm->fence;
114 struct nvc0_fence_chan *fctx = chan->fence;
115 int i;
116
117 if (nv_device(chan->drm->device)->card_type >= NV_D0) {
118 for (i = 0; i < dev->mode_config.num_crtc; i++) {
119 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 }
122 } else
123 if (nv_device(chan->drm->device)->card_type >= NV_50) {
124 for (i = 0; i < dev->mode_config.num_crtc; i++) {
125 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
126 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
127 }
128 }
97 129
98 nouveau_bo_vma_del(priv->bo, &fctx->vma); 130 nouveau_bo_vma_del(priv->bo, &fctx->vma);
99 nouveau_fence_context_del(&fctx->base); 131 nouveau_fence_context_del(&fctx->base);
100 chan->engctx[engine] = NULL; 132 chan->fence = NULL;
101 kfree(fctx); 133 kfree(fctx);
102} 134}
103 135
104static int 136static int
105nvc0_fence_context_new(struct nouveau_channel *chan, int engine) 137nvc0_fence_context_new(struct nouveau_channel *chan)
106{ 138{
107 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine); 139 struct nouveau_fifo_chan *fifo = (void *)chan->object;
140 struct nouveau_client *client = nouveau_client(fifo);
141 struct nvc0_fence_priv *priv = chan->drm->fence;
108 struct nvc0_fence_chan *fctx; 142 struct nvc0_fence_chan *fctx;
109 int ret; 143 int ret, i;
110 144
111 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 145 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
112 if (!fctx) 146 if (!fctx)
113 return -ENOMEM; 147 return -ENOMEM;
114 148
115 nouveau_fence_context_new(&fctx->base); 149 nouveau_fence_context_new(&fctx->base);
116 150
117 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma); 151 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
118 if (ret) 152 if (ret)
119 nvc0_fence_context_del(chan, engine); 153 nvc0_fence_context_del(chan);
154
155 /* map display semaphore buffers into channel's vm */
156 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
157 struct nouveau_bo *bo;
158 if (nv_device(chan->drm->device)->card_type >= NV_D0)
159 bo = nvd0_display_crtc_sema(chan->drm->dev, i);
160 else
161 bo = nv50_display_crtc_sema(chan->drm->dev, i);
120 162
121 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000); 163 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
164 }
165
166 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
122 return ret; 167 return ret;
123} 168}
124 169
125static int 170static bool
126nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend) 171nvc0_fence_suspend(struct nouveau_drm *drm)
127{ 172{
128 return 0; 173 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
174 struct nvc0_fence_priv *priv = drm->fence;
175 int i;
176
177 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
178 if (priv->suspend) {
179 for (i = 0; i <= pfifo->max; i++)
180 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
181 }
182
183 return priv->suspend != NULL;
129} 184}
130 185
131static int 186static void
132nvc0_fence_init(struct drm_device *dev, int engine) 187nvc0_fence_resume(struct nouveau_drm *drm)
133{ 188{
134 return 0; 189 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
190 struct nvc0_fence_priv *priv = drm->fence;
191 int i;
192
193 if (priv->suspend) {
194 for (i = 0; i <= pfifo->max; i++)
195 nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
196 vfree(priv->suspend);
197 priv->suspend = NULL;
198 }
135} 199}
136 200
137static void 201static void
138nvc0_fence_destroy(struct drm_device *dev, int engine) 202nvc0_fence_destroy(struct nouveau_drm *drm)
139{ 203{
140 struct drm_nouveau_private *dev_priv = dev->dev_private; 204 struct nvc0_fence_priv *priv = drm->fence;
141 struct nvc0_fence_priv *priv = nv_engine(dev, engine);
142
143 nouveau_bo_unmap(priv->bo); 205 nouveau_bo_unmap(priv->bo);
144 nouveau_bo_ref(NULL, &priv->bo); 206 nouveau_bo_ref(NULL, &priv->bo);
145 dev_priv->eng[engine] = NULL; 207 drm->fence = NULL;
146 kfree(priv); 208 kfree(priv);
147} 209}
148 210
149int 211int
150nvc0_fence_create(struct drm_device *dev) 212nvc0_fence_create(struct nouveau_drm *drm)
151{ 213{
152 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 214 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 struct nvc0_fence_priv *priv; 215 struct nvc0_fence_priv *priv;
155 int ret; 216 int ret;
156 217
157 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 218 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
158 if (!priv) 219 if (!priv)
159 return -ENOMEM; 220 return -ENOMEM;
160 221
161 priv->base.engine.destroy = nvc0_fence_destroy; 222 priv->base.dtor = nvc0_fence_destroy;
162 priv->base.engine.init = nvc0_fence_init; 223 priv->base.suspend = nvc0_fence_suspend;
163 priv->base.engine.fini = nvc0_fence_fini; 224 priv->base.resume = nvc0_fence_resume;
164 priv->base.engine.context_new = nvc0_fence_context_new; 225 priv->base.context_new = nvc0_fence_context_new;
165 priv->base.engine.context_del = nvc0_fence_context_del; 226 priv->base.context_del = nvc0_fence_context_del;
166 priv->base.emit = nvc0_fence_emit; 227 priv->base.emit = nvc0_fence_emit;
167 priv->base.sync = nvc0_fence_sync; 228 priv->base.sync = nvc0_fence_sync;
168 priv->base.read = nvc0_fence_read; 229 priv->base.read = nvc0_fence_read;
169 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
170 230
171 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM, 231 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
172 0, 0, NULL, &priv->bo); 232 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
173 if (ret == 0) { 233 if (ret == 0) {
174 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 234 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
175 if (ret == 0) 235 if (ret == 0)
@@ -179,6 +239,6 @@ nvc0_fence_create(struct drm_device *dev)
179 } 239 }
180 240
181 if (ret) 241 if (ret)
182 nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 242 nvc0_fence_destroy(drm);
183 return ret; 243 return ret;
184} 244}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
deleted file mode 100644
index 7d85553d518c..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ /dev/null
@@ -1,476 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31static void nvc0_fifo_isr(struct drm_device *);
32
33struct nvc0_fifo_priv {
34 struct nouveau_fifo_priv base;
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37 struct nouveau_vma user_vma;
38 int spoon_nr;
39};
40
41struct nvc0_fifo_chan {
42 struct nouveau_fifo_chan base;
43 struct nouveau_gpuobj *user;
44};
45
46static void
47nvc0_fifo_playlist_update(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
52 struct nouveau_gpuobj *cur;
53 int i, p;
54
55 cur = priv->playlist[priv->cur_playlist];
56 priv->cur_playlist = !priv->cur_playlist;
57
58 for (i = 0, p = 0; i < 128; i++) {
59 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
60 continue;
61 nv_wo32(cur, p + 0, i);
62 nv_wo32(cur, p + 4, 0x00000004);
63 p += 8;
64 }
65 pinstmem->flush(dev);
66
67 nv_wr32(dev, 0x002270, cur->vinst >> 12);
68 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
69 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
71}
72
73static int
74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
75{
76 struct drm_device *dev = chan->dev;
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
79 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
80 struct nvc0_fifo_chan *fctx;
81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
82 int ret, i;
83
84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
85 if (!fctx)
86 return -ENOMEM;
87
88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
89 priv->user_vma.offset + (chan->id * 0x1000),
90 PAGE_SIZE);
91 if (!chan->user) {
92 ret = -ENOMEM;
93 goto error;
94 }
95
96 /* allocate vram for control regs, map into polling area */
97 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
98 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
99 if (ret)
100 goto error;
101
102 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
103 *(struct nouveau_mem **)fctx->user->node);
104
105 for (i = 0; i < 0x100; i += 4)
106 nv_wo32(chan->ramin, i, 0x00000000);
107 nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
108 nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
109 nv_wo32(chan->ramin, 0x10, 0x0000face);
110 nv_wo32(chan->ramin, 0x30, 0xfffff902);
111 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
112 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
113 upper_32_bits(ib_virt));
114 nv_wo32(chan->ramin, 0x54, 0x00000002);
115 nv_wo32(chan->ramin, 0x84, 0x20400000);
116 nv_wo32(chan->ramin, 0x94, 0x30000001);
117 nv_wo32(chan->ramin, 0x9c, 0x00000100);
118 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
119 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
120 nv_wo32(chan->ramin, 0xac, 0x0000001f);
121 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
122 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
123 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
124 pinstmem->flush(dev);
125
126 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
127 (chan->ramin->vinst >> 12));
128 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
129 nvc0_fifo_playlist_update(dev);
130
131error:
132 if (ret)
133 priv->base.base.context_del(chan, engine);
134 return ret;
135}
136
137static void
138nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
139{
140 struct nvc0_fifo_chan *fctx = chan->engctx[engine];
141 struct drm_device *dev = chan->dev;
142
143 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
144 nv_wr32(dev, 0x002634, chan->id);
145 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
146 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
147 nvc0_fifo_playlist_update(dev);
148 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
149
150 nouveau_gpuobj_ref(NULL, &fctx->user);
151 if (chan->user) {
152 iounmap(chan->user);
153 chan->user = NULL;
154 }
155
156 chan->engctx[engine] = NULL;
157 kfree(fctx);
158}
159
160static int
161nvc0_fifo_init(struct drm_device *dev, int engine)
162{
163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
165 struct nouveau_channel *chan;
166 int i;
167
168 /* reset PFIFO, enable all available PSUBFIFO areas */
169 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
170 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
171 nv_wr32(dev, 0x000204, 0xffffffff);
172 nv_wr32(dev, 0x002204, 0xffffffff);
173
174 priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
175 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
176
177 /* assign engines to subfifos */
178 if (priv->spoon_nr >= 3) {
179 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
180 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
181 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
182 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
183 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
184 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
185 }
186
187 /* PSUBFIFO[n] */
188 for (i = 0; i < priv->spoon_nr; i++) {
189 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
190 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
191 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
192 }
193
194 nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
195 nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
196
197 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
198 nv_wr32(dev, 0x002100, 0xffffffff);
199 nv_wr32(dev, 0x002140, 0xbfffffff);
200
201 /* restore PFIFO context table */
202 for (i = 0; i < 128; i++) {
203 chan = dev_priv->channels.ptr[i];
204 if (!chan || !chan->engctx[engine])
205 continue;
206
207 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
208 (chan->ramin->vinst >> 12));
209 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
210 }
211 nvc0_fifo_playlist_update(dev);
212
213 return 0;
214}
215
216static int
217nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 int i;
220
221 for (i = 0; i < 128; i++) {
222 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
223 continue;
224
225 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
226 nv_wr32(dev, 0x002634, i);
227 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
228 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i, nv_rd32(dev, 0x002634));
230 return -EBUSY;
231 }
232 }
233
234 nv_wr32(dev, 0x002140, 0x00000000);
235 return 0;
236}
237
238
239struct nouveau_enum nvc0_fifo_fault_unit[] = {
240 { 0x00, "PGRAPH" },
241 { 0x03, "PEEPHOLE" },
242 { 0x04, "BAR1" },
243 { 0x05, "BAR3" },
244 { 0x07, "PFIFO" },
245 { 0x10, "PBSP" },
246 { 0x11, "PPPP" },
247 { 0x13, "PCOUNTER" },
248 { 0x14, "PVP" },
249 { 0x15, "PCOPY0" },
250 { 0x16, "PCOPY1" },
251 { 0x17, "PDAEMON" },
252 {}
253};
254
255struct nouveau_enum nvc0_fifo_fault_reason[] = {
256 { 0x00, "PT_NOT_PRESENT" },
257 { 0x01, "PT_TOO_SHORT" },
258 { 0x02, "PAGE_NOT_PRESENT" },
259 { 0x03, "VM_LIMIT_EXCEEDED" },
260 { 0x04, "NO_CHANNEL" },
261 { 0x05, "PAGE_SYSTEM_ONLY" },
262 { 0x06, "PAGE_READ_ONLY" },
263 { 0x0a, "COMPRESSED_SYSRAM" },
264 { 0x0c, "INVALID_STORAGE_TYPE" },
265 {}
266};
267
268struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
269 { 0x01, "PCOPY0" },
270 { 0x02, "PCOPY1" },
271 { 0x04, "DISPATCH" },
272 { 0x05, "CTXCTL" },
273 { 0x06, "PFIFO" },
274 { 0x07, "BAR_READ" },
275 { 0x08, "BAR_WRITE" },
276 { 0x0b, "PVP" },
277 { 0x0c, "PPPP" },
278 { 0x0d, "PBSP" },
279 { 0x11, "PCOUNTER" },
280 { 0x12, "PDAEMON" },
281 { 0x14, "CCACHE" },
282 { 0x15, "CCACHE_POST" },
283 {}
284};
285
286struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
287 { 0x01, "TEX" },
288 { 0x0c, "ESETUP" },
289 { 0x0e, "CTXCTL" },
290 { 0x0f, "PROP" },
291 {}
292};
293
294struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
295/* { 0x00008000, "" } seen with null ib push */
296 { 0x00200000, "ILLEGAL_MTHD" },
297 { 0x00800000, "EMPTY_SUBC" },
298 {}
299};
300
301static void
302nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
303{
304 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
305 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
306 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
307 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
308 u32 client = (stat & 0x00001f00) >> 8;
309
310 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
311 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
312 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
313 printk("] from ");
314 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
315 if (stat & 0x00000040) {
316 printk("/");
317 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
318 } else {
319 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
320 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
321 }
322 printk(" on channel 0x%010llx\n", (u64)inst << 12);
323}
324
325static int
326nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
327{
328 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
329 struct drm_nouveau_private *dev_priv = dev->dev_private;
330 struct nouveau_channel *chan = NULL;
331 unsigned long flags;
332 int ret = -EINVAL;
333
334 spin_lock_irqsave(&dev_priv->channels.lock, flags);
335 if (likely(chid >= 0 && chid < priv->base.channels)) {
336 chan = dev_priv->channels.ptr[chid];
337 if (likely(chan))
338 ret = nouveau_finish_page_flip(chan, NULL);
339 }
340 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
341 return ret;
342}
343
344static void
345nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
346{
347 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
348 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
349 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
350 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
351 u32 subc = (addr & 0x00070000);
352 u32 mthd = (addr & 0x00003ffc);
353 u32 show = stat;
354
355 if (stat & 0x00200000) {
356 if (mthd == 0x0054) {
357 if (!nvc0_fifo_page_flip(dev, chid))
358 show &= ~0x00200000;
359 }
360 }
361
362 if (show) {
363 NV_INFO(dev, "PFIFO%d:", unit);
364 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
365 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
366 unit, chid, subc, mthd, data);
367 }
368
369 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
370 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
371}
372
373static void
374nvc0_fifo_isr(struct drm_device *dev)
375{
376 u32 stat = nv_rd32(dev, 0x002100);
377
378 if (stat & 0x00000100) {
379 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
380 nv_wr32(dev, 0x002100, 0x00000100);
381 stat &= ~0x00000100;
382 }
383
384 if (stat & 0x10000000) {
385 u32 units = nv_rd32(dev, 0x00259c);
386 u32 u = units;
387
388 while (u) {
389 int i = ffs(u) - 1;
390 nvc0_fifo_isr_vm_fault(dev, i);
391 u &= ~(1 << i);
392 }
393
394 nv_wr32(dev, 0x00259c, units);
395 stat &= ~0x10000000;
396 }
397
398 if (stat & 0x20000000) {
399 u32 units = nv_rd32(dev, 0x0025a0);
400 u32 u = units;
401
402 while (u) {
403 int i = ffs(u) - 1;
404 nvc0_fifo_isr_subfifo_intr(dev, i);
405 u &= ~(1 << i);
406 }
407
408 nv_wr32(dev, 0x0025a0, units);
409 stat &= ~0x20000000;
410 }
411
412 if (stat & 0x40000000) {
413 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
414 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
415 stat &= ~0x40000000;
416 }
417
418 if (stat) {
419 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
420 nv_wr32(dev, 0x002100, stat);
421 nv_wr32(dev, 0x002140, 0);
422 }
423}
424
425static void
426nvc0_fifo_destroy(struct drm_device *dev, int engine)
427{
428 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430
431 nouveau_vm_put(&priv->user_vma);
432 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
433 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
434
435 dev_priv->eng[engine] = NULL;
436 kfree(priv);
437}
438
439int
440nvc0_fifo_create(struct drm_device *dev)
441{
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443 struct nvc0_fifo_priv *priv;
444 int ret;
445
446 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
447 if (!priv)
448 return -ENOMEM;
449
450 priv->base.base.destroy = nvc0_fifo_destroy;
451 priv->base.base.init = nvc0_fifo_init;
452 priv->base.base.fini = nvc0_fifo_fini;
453 priv->base.base.context_new = nvc0_fifo_context_new;
454 priv->base.base.context_del = nvc0_fifo_context_del;
455 priv->base.channels = 128;
456 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
457
458 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
459 if (ret)
460 goto error;
461
462 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
463 if (ret)
464 goto error;
465
466 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
467 12, NV_MEM_ACCESS_RW, &priv->user_vma);
468 if (ret)
469 goto error;
470
471 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
472error:
473 if (ret)
474 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
475 return ret;
476}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
deleted file mode 100644
index 2a01e6e47724..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ /dev/null
@@ -1,897 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nvc0_graph.h"
35#include "nvc0_grhub.fuc.h"
36#include "nvc0_grgpc.fuc.h"
37
38static void
39nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
40{
41 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
42 nv_rd32(dev, base + 0x400));
43 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
44 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
45 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
46 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
47 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
48 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
49}
50
51static void
52nvc0_graph_ctxctl_debug(struct drm_device *dev)
53{
54 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
55 u32 gpc;
56
57 nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
58 for (gpc = 0; gpc < gpcnr; gpc++)
59 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
60}
61
62static int
63nvc0_graph_load_context(struct nouveau_channel *chan)
64{
65 struct drm_device *dev = chan->dev;
66
67 nv_wr32(dev, 0x409840, 0x00000030);
68 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
69 nv_wr32(dev, 0x409504, 0x00000003);
70 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
71 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
72
73 return 0;
74}
75
76static int
77nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
78{
79 nv_wr32(dev, 0x409840, 0x00000003);
80 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
81 nv_wr32(dev, 0x409504, 0x00000009);
82 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
83 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
84 return -EBUSY;
85 }
86
87 return 0;
88}
89
90static int
91nvc0_graph_construct_context(struct nouveau_channel *chan)
92{
93 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
94 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
95 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
96 struct drm_device *dev = chan->dev;
97 int ret, i;
98 u32 *ctx;
99
100 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
101 if (!ctx)
102 return -ENOMEM;
103
104 if (!nouveau_ctxfw) {
105 nv_wr32(dev, 0x409840, 0x80000000);
106 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
107 nv_wr32(dev, 0x409504, 0x00000001);
108 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
109 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
110 nvc0_graph_ctxctl_debug(dev);
111 ret = -EBUSY;
112 goto err;
113 }
114 } else {
115 nvc0_graph_load_context(chan);
116
117 nv_wo32(grch->grctx, 0x1c, 1);
118 nv_wo32(grch->grctx, 0x20, 0);
119 nv_wo32(grch->grctx, 0x28, 0);
120 nv_wo32(grch->grctx, 0x2c, 0);
121 dev_priv->engine.instmem.flush(dev);
122 }
123
124 ret = nvc0_grctx_generate(chan);
125 if (ret)
126 goto err;
127
128 if (!nouveau_ctxfw) {
129 nv_wr32(dev, 0x409840, 0x80000000);
130 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
131 nv_wr32(dev, 0x409504, 0x00000002);
132 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
133 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
134 nvc0_graph_ctxctl_debug(dev);
135 ret = -EBUSY;
136 goto err;
137 }
138 } else {
139 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
140 if (ret)
141 goto err;
142 }
143
144 for (i = 0; i < priv->grctx_size; i += 4)
145 ctx[i / 4] = nv_ro32(grch->grctx, i);
146
147 priv->grctx_vals = ctx;
148 return 0;
149
150err:
151 kfree(ctx);
152 return ret;
153}
154
155static int
156nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157{
158 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
159 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
160 struct drm_device *dev = chan->dev;
161 struct drm_nouveau_private *dev_priv = dev->dev_private;
162 int i = 0, gpc, tp, ret;
163
164 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
165 &grch->unk408004);
166 if (ret)
167 return ret;
168
169 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
170 &grch->unk40800c);
171 if (ret)
172 return ret;
173
174 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
175 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
176 &grch->unk418810);
177 if (ret)
178 return ret;
179
180 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
181 &grch->mmio);
182 if (ret)
183 return ret;
184
185
186 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
187 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
188 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
189 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
190
191 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
192 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
193 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
194 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
195
196 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
197 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
198 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
199 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
200
201 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
202 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
203 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
204 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
205
206 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
207 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
208 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
209 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
210
211 if (dev_priv->chipset != 0xc1) {
212 u32 magic = 0x02180000;
213 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
214 nv_wo32(grch->mmio, i++ * 4, magic);
215 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
216 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
217 u32 reg = TP_UNIT(gpc, tp, 0x520);
218 nv_wo32(grch->mmio, i++ * 4, reg);
219 nv_wo32(grch->mmio, i++ * 4, magic);
220 magic += 0x0324;
221 }
222 }
223 } else {
224 u32 magic = 0x02180000;
225 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
226 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
227 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
228 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
229 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
230 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
231 u32 reg = TP_UNIT(gpc, tp, 0x520);
232 nv_wo32(grch->mmio, i++ * 4, reg);
233 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
234 magic += 0x0324;
235 }
236 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
237 u32 reg = TP_UNIT(gpc, tp, 0x544);
238 nv_wo32(grch->mmio, i++ * 4, reg);
239 nv_wo32(grch->mmio, i++ * 4, magic);
240 magic += 0x0324;
241 }
242 }
243 }
244
245 grch->mmio_nr = i / 2;
246 return 0;
247}
248
249static int
250nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
251{
252 struct drm_device *dev = chan->dev;
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
254 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
255 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
256 struct nvc0_graph_chan *grch;
257 struct nouveau_gpuobj *grctx;
258 int ret, i;
259
260 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
261 if (!grch)
262 return -ENOMEM;
263 chan->engctx[NVOBJ_ENGINE_GR] = grch;
264
265 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
266 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
267 &grch->grctx);
268 if (ret)
269 goto error;
270 grctx = grch->grctx;
271
272 ret = nvc0_graph_create_context_mmio_list(chan);
273 if (ret)
274 goto error;
275
276 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
277 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
278 pinstmem->flush(dev);
279
280 if (!priv->grctx_vals) {
281 ret = nvc0_graph_construct_context(chan);
282 if (ret)
283 goto error;
284 }
285
286 for (i = 0; i < priv->grctx_size; i += 4)
287 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
288
289 if (!nouveau_ctxfw) {
290 nv_wo32(grctx, 0x00, grch->mmio_nr);
291 nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
292 } else {
293 nv_wo32(grctx, 0xf4, 0);
294 nv_wo32(grctx, 0xf8, 0);
295 nv_wo32(grctx, 0x10, grch->mmio_nr);
296 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
297 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
298 nv_wo32(grctx, 0x1c, 1);
299 nv_wo32(grctx, 0x20, 0);
300 nv_wo32(grctx, 0x28, 0);
301 nv_wo32(grctx, 0x2c, 0);
302 }
303 pinstmem->flush(dev);
304 return 0;
305
306error:
307 priv->base.context_del(chan, engine);
308 return ret;
309}
310
311static void
312nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
313{
314 struct nvc0_graph_chan *grch = chan->engctx[engine];
315
316 nouveau_gpuobj_ref(NULL, &grch->mmio);
317 nouveau_gpuobj_ref(NULL, &grch->unk418810);
318 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
319 nouveau_gpuobj_ref(NULL, &grch->unk408004);
320 nouveau_gpuobj_ref(NULL, &grch->grctx);
321 chan->engctx[engine] = NULL;
322}
323
324static int
325nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
326 u32 handle, u16 class)
327{
328 return 0;
329}
330
331static int
332nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
333{
334 return 0;
335}
336
337static void
338nvc0_graph_init_obj418880(struct drm_device *dev)
339{
340 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
341 int i;
342
343 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
344 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
345 for (i = 0; i < 4; i++)
346 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
347 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
348 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
349}
350
351static void
352nvc0_graph_init_regs(struct drm_device *dev)
353{
354 nv_wr32(dev, 0x400080, 0x003083c2);
355 nv_wr32(dev, 0x400088, 0x00006fe7);
356 nv_wr32(dev, 0x40008c, 0x00000000);
357 nv_wr32(dev, 0x400090, 0x00000030);
358 nv_wr32(dev, 0x40013c, 0x013901f7);
359 nv_wr32(dev, 0x400140, 0x00000100);
360 nv_wr32(dev, 0x400144, 0x00000000);
361 nv_wr32(dev, 0x400148, 0x00000110);
362 nv_wr32(dev, 0x400138, 0x00000000);
363 nv_wr32(dev, 0x400130, 0x00000000);
364 nv_wr32(dev, 0x400134, 0x00000000);
365 nv_wr32(dev, 0x400124, 0x00000002);
366}
367
368static void
369nvc0_graph_init_gpc_0(struct drm_device *dev)
370{
371 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
372 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
373 u32 data[TP_MAX / 8];
374 u8 tpnr[GPC_MAX];
375 int i, gpc, tpc;
376
377 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
378
379 /*
380 * TP ROP UNKVAL(magic_not_rop_nr)
381 * 450: 4/0/0/0 2 3
382 * 460: 3/4/0/0 4 1
383 * 465: 3/4/4/0 4 7
384 * 470: 3/3/4/4 5 5
385 * 480: 3/4/4/4 6 6
386 */
387
388 memset(data, 0x00, sizeof(data));
389 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
390 for (i = 0, gpc = -1; i < priv->tp_total; i++) {
391 do {
392 gpc = (gpc + 1) % priv->gpc_nr;
393 } while (!tpnr[gpc]);
394 tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
395
396 data[i / 8] |= tpc << ((i % 8) * 4);
397 }
398
399 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
400 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
401 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
402 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
403
404 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
405 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
406 priv->tp_nr[gpc]);
407 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
408 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
409 }
410
411 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
412 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
413}
414
415static void
416nvc0_graph_init_units(struct drm_device *dev)
417{
418 nv_wr32(dev, 0x409c24, 0x000f0000);
419 nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
420 nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
421 nv_wr32(dev, 0x408030, 0xc0000000);
422 nv_wr32(dev, 0x40601c, 0xc0000000);
423 nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
424 nv_wr32(dev, 0x406018, 0xc0000000);
425 nv_wr32(dev, 0x405840, 0xc0000000);
426 nv_wr32(dev, 0x405844, 0x00ffffff);
427 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
428 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
429}
430
431static void
432nvc0_graph_init_gpc_1(struct drm_device *dev)
433{
434 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
435 int gpc, tp;
436
437 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
438 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
439 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
440 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
441 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
442 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
443 nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
444 nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
445 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
446 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
447 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
448 nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
449 nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
450 }
451 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
452 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
453 }
454}
455
456static void
457nvc0_graph_init_rop(struct drm_device *dev)
458{
459 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
460 int rop;
461
462 for (rop = 0; rop < priv->rop_nr; rop++) {
463 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
464 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
465 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
466 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
467 }
468}
469
470static void
471nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
472 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
473{
474 int i;
475
476 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
477 for (i = 0; i < data->size / 4; i++)
478 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
479
480 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
481 for (i = 0; i < code->size / 4; i++) {
482 if ((i & 0x3f) == 0)
483 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
484 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
485 }
486}
487
488static int
489nvc0_graph_init_ctxctl(struct drm_device *dev)
490{
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
492 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
493 u32 r000260;
494 int i;
495
496 if (!nouveau_ctxfw) {
497 /* load HUB microcode */
498 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
499 nv_wr32(dev, 0x4091c0, 0x01000000);
500 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
501 nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
502
503 nv_wr32(dev, 0x409180, 0x01000000);
504 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
505 if ((i & 0x3f) == 0)
506 nv_wr32(dev, 0x409188, i >> 6);
507 nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
508 }
509
510 /* load GPC microcode */
511 nv_wr32(dev, 0x41a1c0, 0x01000000);
512 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
513 nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
514
515 nv_wr32(dev, 0x41a180, 0x01000000);
516 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
517 if ((i & 0x3f) == 0)
518 nv_wr32(dev, 0x41a188, i >> 6);
519 nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
520 }
521 nv_wr32(dev, 0x000260, r000260);
522
523 /* start HUB ucode running, it'll init the GPCs */
524 nv_wr32(dev, 0x409800, dev_priv->chipset);
525 nv_wr32(dev, 0x40910c, 0x00000000);
526 nv_wr32(dev, 0x409100, 0x00000002);
527 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
528 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
529 nvc0_graph_ctxctl_debug(dev);
530 return -EBUSY;
531 }
532
533 priv->grctx_size = nv_rd32(dev, 0x409804);
534 return 0;
535 }
536
537 /* load fuc microcode */
538 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
539 nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
540 nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
541 nv_wr32(dev, 0x000260, r000260);
542
543 /* start both of them running */
544 nv_wr32(dev, 0x409840, 0xffffffff);
545 nv_wr32(dev, 0x41a10c, 0x00000000);
546 nv_wr32(dev, 0x40910c, 0x00000000);
547 nv_wr32(dev, 0x41a100, 0x00000002);
548 nv_wr32(dev, 0x409100, 0x00000002);
549 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
550 NV_INFO(dev, "0x409800 wait failed\n");
551
552 nv_wr32(dev, 0x409840, 0xffffffff);
553 nv_wr32(dev, 0x409500, 0x7fffffff);
554 nv_wr32(dev, 0x409504, 0x00000021);
555
556 nv_wr32(dev, 0x409840, 0xffffffff);
557 nv_wr32(dev, 0x409500, 0x00000000);
558 nv_wr32(dev, 0x409504, 0x00000010);
559 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
560 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
561 return -EBUSY;
562 }
563 priv->grctx_size = nv_rd32(dev, 0x409800);
564
565 nv_wr32(dev, 0x409840, 0xffffffff);
566 nv_wr32(dev, 0x409500, 0x00000000);
567 nv_wr32(dev, 0x409504, 0x00000016);
568 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
569 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
570 return -EBUSY;
571 }
572
573 nv_wr32(dev, 0x409840, 0xffffffff);
574 nv_wr32(dev, 0x409500, 0x00000000);
575 nv_wr32(dev, 0x409504, 0x00000025);
576 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
577 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
578 return -EBUSY;
579 }
580
581 return 0;
582}
583
584static int
585nvc0_graph_init(struct drm_device *dev, int engine)
586{
587 int ret;
588
589 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
590 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
591
592 nvc0_graph_init_obj418880(dev);
593 nvc0_graph_init_regs(dev);
594 /*nvc0_graph_init_unitplemented_magics(dev);*/
595 nvc0_graph_init_gpc_0(dev);
596 /*nvc0_graph_init_unitplemented_c242(dev);*/
597
598 nv_wr32(dev, 0x400500, 0x00010001);
599 nv_wr32(dev, 0x400100, 0xffffffff);
600 nv_wr32(dev, 0x40013c, 0xffffffff);
601
602 nvc0_graph_init_units(dev);
603 nvc0_graph_init_gpc_1(dev);
604 nvc0_graph_init_rop(dev);
605
606 nv_wr32(dev, 0x400108, 0xffffffff);
607 nv_wr32(dev, 0x400138, 0xffffffff);
608 nv_wr32(dev, 0x400118, 0xffffffff);
609 nv_wr32(dev, 0x400130, 0xffffffff);
610 nv_wr32(dev, 0x40011c, 0xffffffff);
611 nv_wr32(dev, 0x400134, 0xffffffff);
612 nv_wr32(dev, 0x400054, 0x34ce3464);
613
614 ret = nvc0_graph_init_ctxctl(dev);
615 if (ret)
616 return ret;
617
618 return 0;
619}
620
621int
622nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
623{
624 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
625 struct drm_nouveau_private *dev_priv = dev->dev_private;
626 struct nouveau_channel *chan;
627 unsigned long flags;
628 int i;
629
630 spin_lock_irqsave(&dev_priv->channels.lock, flags);
631 for (i = 0; i < pfifo->channels; i++) {
632 chan = dev_priv->channels.ptr[i];
633 if (!chan || !chan->ramin)
634 continue;
635
636 if (inst == chan->ramin->vinst)
637 break;
638 }
639 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
640 return i;
641}
642
643static void
644nvc0_graph_ctxctl_isr(struct drm_device *dev)
645{
646 u32 ustat = nv_rd32(dev, 0x409c18);
647
648 if (ustat & 0x00000001)
649 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
650 if (ustat & 0x00080000)
651 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
652 if (ustat & ~0x00080001)
653 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
654
655 nvc0_graph_ctxctl_debug(dev);
656 nv_wr32(dev, 0x409c20, ustat);
657}
658
659static void
660nvc0_graph_isr(struct drm_device *dev)
661{
662 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
663 u32 chid = nvc0_graph_isr_chid(dev, inst);
664 u32 stat = nv_rd32(dev, 0x400100);
665 u32 addr = nv_rd32(dev, 0x400704);
666 u32 mthd = (addr & 0x00003ffc);
667 u32 subc = (addr & 0x00070000) >> 16;
668 u32 data = nv_rd32(dev, 0x400708);
669 u32 code = nv_rd32(dev, 0x400110);
670 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
671
672 if (stat & 0x00000010) {
673 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
674 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
675 "subc %d class 0x%04x mthd 0x%04x "
676 "data 0x%08x\n",
677 chid, inst, subc, class, mthd, data);
678 }
679 nv_wr32(dev, 0x400100, 0x00000010);
680 stat &= ~0x00000010;
681 }
682
683 if (stat & 0x00000020) {
684 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
685 "class 0x%04x mthd 0x%04x data 0x%08x\n",
686 chid, inst, subc, class, mthd, data);
687 nv_wr32(dev, 0x400100, 0x00000020);
688 stat &= ~0x00000020;
689 }
690
691 if (stat & 0x00100000) {
692 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
693 nouveau_enum_print(nv50_data_error_names, code);
694 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
695 "mthd 0x%04x data 0x%08x\n",
696 chid, inst, subc, class, mthd, data);
697 nv_wr32(dev, 0x400100, 0x00100000);
698 stat &= ~0x00100000;
699 }
700
701 if (stat & 0x00200000) {
702 u32 trap = nv_rd32(dev, 0x400108);
703 NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
704 nv_wr32(dev, 0x400108, trap);
705 nv_wr32(dev, 0x400100, 0x00200000);
706 stat &= ~0x00200000;
707 }
708
709 if (stat & 0x00080000) {
710 nvc0_graph_ctxctl_isr(dev);
711 nv_wr32(dev, 0x400100, 0x00080000);
712 stat &= ~0x00080000;
713 }
714
715 if (stat) {
716 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
717 nv_wr32(dev, 0x400100, stat);
718 }
719
720 nv_wr32(dev, 0x400500, 0x00010001);
721}
722
723static int
724nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
725 struct nvc0_graph_fuc *fuc)
726{
727 struct drm_nouveau_private *dev_priv = dev->dev_private;
728 const struct firmware *fw;
729 char f[32];
730 int ret;
731
732 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
733 ret = request_firmware(&fw, f, &dev->pdev->dev);
734 if (ret) {
735 snprintf(f, sizeof(f), "nouveau/%s", fwname);
736 ret = request_firmware(&fw, f, &dev->pdev->dev);
737 if (ret) {
738 NV_ERROR(dev, "failed to load %s\n", fwname);
739 return ret;
740 }
741 }
742
743 fuc->size = fw->size;
744 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
745 release_firmware(fw);
746 return (fuc->data != NULL) ? 0 : -ENOMEM;
747}
748
749static void
750nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
751{
752 if (fuc->data) {
753 kfree(fuc->data);
754 fuc->data = NULL;
755 }
756}
757
758static void
759nvc0_graph_destroy(struct drm_device *dev, int engine)
760{
761 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
762
763 if (nouveau_ctxfw) {
764 nvc0_graph_destroy_fw(&priv->fuc409c);
765 nvc0_graph_destroy_fw(&priv->fuc409d);
766 nvc0_graph_destroy_fw(&priv->fuc41ac);
767 nvc0_graph_destroy_fw(&priv->fuc41ad);
768 }
769
770 nouveau_irq_unregister(dev, 12);
771
772 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
773 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
774
775 if (priv->grctx_vals)
776 kfree(priv->grctx_vals);
777
778 NVOBJ_ENGINE_DEL(dev, GR);
779 kfree(priv);
780}
781
782int
783nvc0_graph_create(struct drm_device *dev)
784{
785 struct drm_nouveau_private *dev_priv = dev->dev_private;
786 struct nvc0_graph_priv *priv;
787 int ret, gpc, i;
788 u32 fermi;
789
790 fermi = nvc0_graph_class(dev);
791 if (!fermi) {
792 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
793 return 0;
794 }
795
796 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
797 if (!priv)
798 return -ENOMEM;
799
800 priv->base.destroy = nvc0_graph_destroy;
801 priv->base.init = nvc0_graph_init;
802 priv->base.fini = nvc0_graph_fini;
803 priv->base.context_new = nvc0_graph_context_new;
804 priv->base.context_del = nvc0_graph_context_del;
805 priv->base.object_new = nvc0_graph_object_new;
806
807 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
808 nouveau_irq_register(dev, 12, nvc0_graph_isr);
809
810 if (nouveau_ctxfw) {
811 NV_INFO(dev, "PGRAPH: using external firmware\n");
812 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
813 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
814 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
815 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
816 ret = 0;
817 goto error;
818 }
819 }
820
821 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
822 if (ret)
823 goto error;
824
825 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
826 if (ret)
827 goto error;
828
829 for (i = 0; i < 0x1000; i += 4) {
830 nv_wo32(priv->unk4188b4, i, 0x00000010);
831 nv_wo32(priv->unk4188b8, i, 0x00000010);
832 }
833
834 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
835 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
836 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
837 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
838 priv->tp_total += priv->tp_nr[gpc];
839 }
840
841 /*XXX: these need figuring out... */
842 switch (dev_priv->chipset) {
843 case 0xc0:
844 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
845 priv->magic_not_rop_nr = 0x07;
846 } else
847 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
848 priv->magic_not_rop_nr = 0x05;
849 } else
850 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
851 priv->magic_not_rop_nr = 0x06;
852 }
853 break;
854 case 0xc3: /* 450, 4/0/0/0, 2 */
855 priv->magic_not_rop_nr = 0x03;
856 break;
857 case 0xc4: /* 460, 3/4/0/0, 4 */
858 priv->magic_not_rop_nr = 0x01;
859 break;
860 case 0xc1: /* 2/0/0/0, 1 */
861 priv->magic_not_rop_nr = 0x01;
862 break;
863 case 0xc8: /* 4/4/3/4, 5 */
864 priv->magic_not_rop_nr = 0x06;
865 break;
866 case 0xce: /* 4/4/0/0, 4 */
867 priv->magic_not_rop_nr = 0x03;
868 break;
869 case 0xcf: /* 4/0/0/0, 3 */
870 priv->magic_not_rop_nr = 0x03;
871 break;
872 case 0xd9: /* 1/0/0/0, 1 */
873 priv->magic_not_rop_nr = 0x01;
874 break;
875 }
876
877 if (!priv->magic_not_rop_nr) {
878 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
879 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
880 priv->tp_nr[3], priv->rop_nr);
881 priv->magic_not_rop_nr = 0x00;
882 }
883
884 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
885 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
886 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
887 if (fermi >= 0x9197)
888 NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
889 if (fermi >= 0x9297)
890 NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
891 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
892 return 0;
893
894error:
895 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
896 return ret;
897}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
deleted file mode 100644
index 91d44ea662d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TP_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nvc0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nvc0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nvc0_graph_fuc fuc409c;
46 struct nvc0_graph_fuc fuc409d;
47 struct nvc0_graph_fuc fuc41ac;
48 struct nvc0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tp_nr[GPC_MAX];
53 u8 tp_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nvc0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nvc0_grctx_generate(struct nouveau_channel *);
73
74/* nvc0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nvc0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xc0:
82 case 0xc3:
83 case 0xc4:
84 case 0xce: /* guess, mmio trace shows only 0x9097 state */
85 case 0xcf: /* guess, mmio trace shows only 0x9097 state */
86 return 0x9097;
87 case 0xc1:
88 return 0x9197;
89 case 0xc8:
90 case 0xd9:
91 return 0x9297;
92 default:
93 return 0;
94 }
95}
96
97#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
deleted file mode 100644
index de77842b31c0..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ /dev/null
@@ -1,2878 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nvc0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 2) {}
36}
37
38static void
39nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
40{
41 nv_wr32(dev, 0x40448c, data);
42 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
43}
44
45static void
46nvc0_grctx_generate_9097(struct drm_device *dev)
47{
48 u32 fermi = nvc0_graph_class(dev);
49 u32 mthd;
50
51 nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
52 nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
53 nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
54 nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
55 nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
56 nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
57 nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
58 nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
59 nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
60 nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
61 nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
62 nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
63 nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
64 nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
65 nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
66 nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
67 nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
68 nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
69 nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
70 nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
71 nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
72 nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
73 nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
74 nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
75 nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
76 nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
77 nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
78 nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
79 nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
80 nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
81 nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
82 nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
83 nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
84 nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
85 nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
86 nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
87 nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
88 nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
89 nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
90 nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
91 nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
92 nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
93 nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
94 nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
95 nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
96 nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
97 nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
98 nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
99 nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
100 nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
101 nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
102 nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
103 nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
104 nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
105 nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
106 nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
107 nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
108 nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
109 nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
110 nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
111 nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
112 nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
113 nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
114 nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
115 nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
116 nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
117 nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
118 nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
119 nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
120 nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
121 nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
122 nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
123 nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
124 nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
125 nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
126 nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
127 nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
128 nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
129 nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
130 nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
131 nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
132 nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
133 nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
134 nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
135 nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
136 nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
137 nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
138 nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
139 nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
140 nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
141 nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
142 nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
143 nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
144 nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
145 nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
146 nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
147 nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
148 nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
149 nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
150 nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
151 nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
152 nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
153 nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
154 nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
155 nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
156 nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
157 nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
158 nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
159 nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
160 nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
161 nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
162 nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
163 nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
164 nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
165 nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
166 nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
167 nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
168 nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
169 nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
170 nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
171 nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
172 nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
173 nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
174 nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
175 nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
176 nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
177 nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
178 nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
179 nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
180 nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
181 nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
182 nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
183 nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
184 nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
185 nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
186 nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
187 nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
188 nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
189 nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
190 nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
191 nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
192 nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
193 nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
194 nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
195 nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
196 nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
197 nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
198 nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
199 nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
200 nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
201 nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
202 nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
203 nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
204 nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
205 nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
206 nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
207 nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
208 nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
209 nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
210 nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
211 nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
212 nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
213 nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
214 nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
215 nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
216 nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
217 nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
218 nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
219 nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
220 nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
221 nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
222 nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
223 nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
224 nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
225 nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
226 nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
227 nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
228 nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
229 nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
230 nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
231 nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
232 nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
233 nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
234 nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
235 nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
236 nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
237 nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
238 nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
239 nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
240 nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
241 nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
242 nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
243 nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
244 nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
245 nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
246 nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
247 nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
248 nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
249 nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
250 nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
251 nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
252 nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
253 nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
254 nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
255 nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
256 nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
257 nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
258 nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
259 nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
260 nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
261 nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
262 nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
263 nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
264 nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
265 nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
266 nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
267 nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
268 nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
269 nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
270 nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
271 nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
272 nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
273 nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
274 nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
275 nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
276 nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
277 nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
278 nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
279 nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
280 nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
281 nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
282 nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
283 nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
284 nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
285 nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
286 nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
287 nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
288 nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
289 nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
290 nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
291 nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
292 nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
293 nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
294 nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
295 nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
296 nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
297 nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
298 nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
299 nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
300 nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
301 nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
302 nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
303 nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
304 nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
305 nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
306 nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
307 nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
308 nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
309 nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
310 nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
311 nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
312 nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
313 nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
314 nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
315 nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
316 nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
317 nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
318 nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
319 nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
320 nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
321 nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
322 nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
323 nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
324 nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
325 nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
326 nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
327 nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
328 nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
329 nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
330 nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
331 nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
332 nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
333 nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
334 nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
335 nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
336 nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
337 nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
338 nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
339 nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
340 nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
341 nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
342 nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
343 nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
344 nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
345 nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
346 nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
347 nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
348 nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
349 nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
350 nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
351 nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
352 nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
353 nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
354 nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
355 nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
356 nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
357 nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
358 nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
359 nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
360 nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
361 nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
362 nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
363 nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
364 nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
365 nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
366 nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
367 nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
368 nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
369 nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
370 nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
371 nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
372 nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
373 nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
374 nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
375 nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
376 nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
377 nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
378 nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
379 nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
380 nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
381 nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
382 nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
383 nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
384 nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
385 nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
386 nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
387 nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
388 nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
389 nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
390 nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
391 nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
392 nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
393 nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
394 nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
395 nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
396 nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
397 nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
398 nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
399 nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
400 nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
401 nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
402 nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
403 nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
404 nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
405 nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
406 nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
407 nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
408 nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
409 nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
410 nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
411 nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
412 nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
413 nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
414 nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
415 nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
416 nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
417 nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
418 nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
419 nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
420 nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
421 nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
422 nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
423 nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
424 nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
425 nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
426 nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
427 nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
428 nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
429 nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
430 nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
431 nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
432 nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
433 nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
434 nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
435 nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
436 nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
437 nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
438 nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
439 nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
440 nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
441 nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
442 nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
443 nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
444 nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
445 nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
446 nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
447 nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
448 nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
449 nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
450 nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
451 nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
452 nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
453 nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
454 nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
455 nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
456 nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
457 nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
458 nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
459 nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
460 nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
461 nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
462 nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
463 nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
464 nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
465 nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
466 nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
467 nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
468 nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
469 nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
470 nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
471 nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
472 nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
473 nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
474 nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
475 nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
476 nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
477 nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
478 nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
479 nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
480 nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
481 nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
482 nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
483 nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
484 nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
485 nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
486 nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
487 nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
488 nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
489 nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
490 nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
491 nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
492 nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
493 nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
494 nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
495 nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
496 nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
497 nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
498 nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
499 nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
500 nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
501 nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
502 nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
503 nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
504 nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
505 nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
506 nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
507 nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
508 nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
509 nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
510 nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
511 nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
512 nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
513 nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
514 nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
515 nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
516 nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
517 nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
518 nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
519 nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
520 nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
521 nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
522 nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
523 nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
524 nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
525 nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
526 nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
527 nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
528 nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
529 nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
530 nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
531 nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
532 nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
533 nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
534 nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
535 nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
536 nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
537 nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
538 nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
539 nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
540 nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
541 nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
542 nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
543 nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
544 nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
545 nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
546 nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
547 nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
548 nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
549 nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
550 nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
551 nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
552 nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
553 nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
554 nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
555 nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
556 nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
557 nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
558 nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
559 nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
560 nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
561 nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
562 nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
563 nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
564 nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
565 nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
566 nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
567 nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
568 nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
569 nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
570 nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
571 nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
572 nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
573 nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
574 nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
575 nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
576 nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
577 nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
578 nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
579 nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
580 nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
581 nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
582 nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
583 nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
584 nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
585 nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
586 nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
587 nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
588 nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
589 nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
590 nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
591 nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
592 nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
593 nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
594 nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
595 nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
596 nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
597 nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
598 nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
599 nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
600 nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
601 nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
602 nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
603 nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
604 nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
605 nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
606 nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
607 nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
608 nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
609 nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
610 nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
611 nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
612 nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
613 nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
614 nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
615 nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
616 nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
617 nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
618 nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
619 nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
620 nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
621 nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
622 nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
623 nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
624 nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
625 nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
626 nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
627 nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
628 nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
629 nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
630 nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
631 nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
632 nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
633 nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
634 nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
635 nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
636 nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
637 nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
638 nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
639 nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
640 nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
641 nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
642 nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
643 nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
644 nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
645 nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
646 nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
647 nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
648 nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
649 nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
650 nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
651 nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
652 nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
653 nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
654 nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
655 nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
656 nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
657 nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
658 nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
659 nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
660 nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
661 nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
662 nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
663 nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
664 nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
665 nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
666 nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
667 nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
668 nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
669 nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
670 nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
671 nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
672 nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
673 nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
674 nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
675 nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
676 nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
677 nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
678 nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
679 nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
680 nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
681 nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
682 nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
683 nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
684 nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
685 nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
686 nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
687 nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
688 nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
689 nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
690 nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
691 nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
692 nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
693 nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
694 nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
695 nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
696 nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
697 nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
698 nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
699 nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
700 nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
701 nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
702 nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
703 nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
704 nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
705 nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
706 nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
707 nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
708 nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
709 nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
710 nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
711 nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
712 nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
713 nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
714 nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
715 nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
716 nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
717 nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
718 nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
719 nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
720 nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
721 nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
722 nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
723 nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
724 nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
725 nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
726 nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
727 nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
728 nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
729 nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
730 nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
731 nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
732 nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
733 nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
734 nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
735 nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
736 nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
737 nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
738 nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
739 nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
740 nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
741 nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
742 nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
743 nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
744 nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
745 nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
746 nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
747 nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
748 nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
749 nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
750 nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
751 nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
752 nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
753 nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
754 nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
755 nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
756 nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
757 nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
758 nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
759 nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
760 nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
761 nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
762 nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
763 nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
764 nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
765 nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
766 nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
767 nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
768 nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
769 nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
770 nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
771 nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
772 nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
773 nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
774 nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
775 nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
776 nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
777 nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
778 nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
779 nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
780 nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
781 nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
782 nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
783 nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
784 nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
785 nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
786 nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
787 nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
788 nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
789 nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
790 nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
791 nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
792 nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
793 nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
794 nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
795 nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
796 nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
797 nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
798 nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
799 nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
800 nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
801 nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
802 nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
803 nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
804 nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
805 nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
806 nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
807 nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
808 nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
809 nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
810 nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
811 nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
812 nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
813 nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
814 nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
815 nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
816 nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
817 nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
818 nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
819 nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
820 nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
821 nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
822 nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
823 nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
824 nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
825 nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
826 nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
827 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
828 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
829 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
830 if (fermi == 0x9097) {
831 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
832 nv_mthd(dev, 0x9097, mthd, 0x00000000);
833 }
834 nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
835 nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
836 nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
837 nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
838 nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
839 nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
840 nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
841 nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
842 nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
843 nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
844 nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
845 nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
846 nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
847 nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
848 nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
849 nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
850 nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
851 nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
852 nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
853 nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
854 nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
855 nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
856 nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
857 nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
858 nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
859 nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
860 nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
861 nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
862 nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
863 nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
864 nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
865 nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
866 nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
867 nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
868 nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
869 nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
870 nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
871 nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
872 nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
873 nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
874 nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
875 nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
876 nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
877 nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
878 nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
879 nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
880 nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
881 nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
882 nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
883 nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
884 nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
885 nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
886 nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
887 nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
888 nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
889 nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
890 nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
891 nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
892 nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
893 nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
894 nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
895 nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
896 nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
897 nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
898 nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
899 nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
900 nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
901 nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
902 nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
903 nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
904 nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
905 nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
906 nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
907 nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
908 nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
909 nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
910 nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
911 nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
912 nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
913 nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
914 nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
915 nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
916 nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
917 nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
918 nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
919 nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
920 nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
921 nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
922 nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
923 nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
924 nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
925 nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
926 nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
927 nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
928 nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
929 nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
930 nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
931 nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
932 nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
933 nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
934 nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
935 nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
936 nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
937 nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
938 nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
939 nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
940 nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
941 nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
942 nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
943 nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
944 nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
945 nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
946 nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
947 nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
948 nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
949 nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
950 nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
951 nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
952 nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
953 nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
954 nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
955 nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
956 nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
957 nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
958 nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
959 nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
960 nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
961 nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
962 nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
963 nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
964 nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
965 nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
966 nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
967 nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
968 nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
969 nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
970 nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
971 nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
972 nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
973 nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
974 nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
975 nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
976 nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
977 nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
978 nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
979 nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
980 nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
981 nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
982 nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
983 nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
984 nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
985 nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
986 nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
987 nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
988 nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
989 nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
990 nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
991 nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
992 nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
993 nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
994 nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
995 nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
996 nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
997 nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
998 nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
999 nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
1000 nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
1001 nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
1002 nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
1003 nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
1004 nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
1005 nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
1006 nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
1007 nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
1008 nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
1009 nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
1010 nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
1011 nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
1012 nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
1013 nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
1014 nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
1015 nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
1016 nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
1017 nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
1018 nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
1019 nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
1020 nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
1021 nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
1022 nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
1023 nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
1024 nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
1025 nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
1026 nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
1027 nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
1028 nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
1029 nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
1030 nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
1031 nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
1032 nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
1033 nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
1034 nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
1035 nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
1036 nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
1037 nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
1038 nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
1039 nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
1040 nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
1041 nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
1042 nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
1043 nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
1044 nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
1045 nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
1046 nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
1047 nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
1048 nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
1049 nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
1050 nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
1051 nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
1052 nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
1053 nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
1054 nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
1055 nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
1056 nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
1057 nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
1058 nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
1059 nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
1060 nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
1061 nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
1062 nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
1063 nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
1064 nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
1065 nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
1066 nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
1067 nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
1068 nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
1069 nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
1070 nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
1071 nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
1072 nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
1073 nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
1074 nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
1075 nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
1076 nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
1077 nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
1078 nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
1079 nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
1080 nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
1081 nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
1082 nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
1083 nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
1084 nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
1085 nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
1086 nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
1087 nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
1088 nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
1089 nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
1090 nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
1091 nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
1092 nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
1093 nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
1094 nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
1095 nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
1096 nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
1097 nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
1098 nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
1099 nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
1100 nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
1101 nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
1102 nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
1103 nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
1104 nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
1105 nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
1106 nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
1107 nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
1108 nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
1109 nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
1110 nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
1111 nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
1112 nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
1113 nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
1114 nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
1115 nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
1116 nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
1117 nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
1118 nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
1119 nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
1120 nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
1121 nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
1122 nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
1123 nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
1124 nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
1125 nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
1126 nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
1127 nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
1128 nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
1129 nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
1130 nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
1131 nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
1132 nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
1133 nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
1134 nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
1135 nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
1136 nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
1137 nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
1138 nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
1139 nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
1140 nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
1141 nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
1142 nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
1143 nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
1144 nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
1145 nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
1146 nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
1147 nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
1148 nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
1149 nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
1150 nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
1151 nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
1152 nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
1153 nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
1154 nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
1155 nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
1156 nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
1157 nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
1158 nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
1159 nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
1160 nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
1161 nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
1162 nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
1163 nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
1164 nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
1165 nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
1166 nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
1167 nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
1168 nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
1169 nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
1170 nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
1171 nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
1172 nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
1173 nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
1174 nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
1175 nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
1176 nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
1177 nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
1178 nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
1179 nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
1180 nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
1181 nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
1182 nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
1183 nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
1184 nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
1185 nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
1186 nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
1187 nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
1188 nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
1189 nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
1190 nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
1191 nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
1192 nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
1193 nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
1194 nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
1195 nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
1196 nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
1197 nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
1198 /* in trace, right after 0x90c0, not here */
1199 nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
1200}
1201
1202static void
1203nvc0_grctx_generate_9197(struct drm_device *dev)
1204{
1205 u32 fermi = nvc0_graph_class(dev);
1206 u32 mthd;
1207
1208 if (fermi == 0x9197) {
1209 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1210 nv_mthd(dev, 0x9197, mthd, 0x00000000);
1211 }
1212 nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
1213}
1214
1215static void
1216nvc0_grctx_generate_9297(struct drm_device *dev)
1217{
1218 u32 fermi = nvc0_graph_class(dev);
1219 u32 mthd;
1220
1221 if (fermi == 0x9297) {
1222 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1223 nv_mthd(dev, 0x9297, mthd, 0x00000000);
1224 }
1225 nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
1226 nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
1227 nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
1228 nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
1229 nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
1230 nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
1231}
1232
1233static void
1234nvc0_grctx_generate_902d(struct drm_device *dev)
1235{
1236 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
1237 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
1238 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
1239 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
1240 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
1241 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
1242 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
1243 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
1244 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
1245 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
1246 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
1247 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
1248 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
1249 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
1250 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
1251 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
1252 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
1253}
1254
1255static void
1256nvc0_grctx_generate_9039(struct drm_device *dev)
1257{
1258 nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
1259 nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
1260 nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
1261 nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
1262 nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
1263 nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
1264 nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
1265 nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
1266}
1267
1268static void
1269nvc0_grctx_generate_90c0(struct drm_device *dev)
1270{
1271 struct drm_nouveau_private *dev_priv = dev->dev_private;
1272 int i;
1273
1274 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1275 nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1276 nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1277 nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
1278 nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
1279 nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
1280 nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
1281 }
1282 nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
1283 nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
1284 nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
1285 nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
1286 nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
1287 nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
1288 nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
1289 nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
1290 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1291 nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1292 nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1293 nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
1294 nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
1295 }
1296 nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
1297 nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
1298 nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
1299 nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
1300 nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
1301 nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
1302 nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
1303 nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
1304 nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
1305 nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
1306 nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
1307 nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
1308 nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
1309 nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
1310 nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
1311 nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
1312 nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
1313 nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
1314 nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
1315}
1316
1317static void
1318nvc0_grctx_generate_dispatch(struct drm_device *dev)
1319{
1320 int i;
1321
1322 nv_wr32(dev, 0x404004, 0x00000000);
1323 nv_wr32(dev, 0x404008, 0x00000000);
1324 nv_wr32(dev, 0x40400c, 0x00000000);
1325 nv_wr32(dev, 0x404010, 0x00000000);
1326 nv_wr32(dev, 0x404014, 0x00000000);
1327 nv_wr32(dev, 0x404018, 0x00000000);
1328 nv_wr32(dev, 0x40401c, 0x00000000);
1329 nv_wr32(dev, 0x404020, 0x00000000);
1330 nv_wr32(dev, 0x404024, 0x00000000);
1331 nv_wr32(dev, 0x404028, 0x00000000);
1332 nv_wr32(dev, 0x40402c, 0x00000000);
1333 nv_wr32(dev, 0x404044, 0x00000000);
1334 nv_wr32(dev, 0x404094, 0x00000000);
1335 nv_wr32(dev, 0x404098, 0x00000000);
1336 nv_wr32(dev, 0x40409c, 0x00000000);
1337 nv_wr32(dev, 0x4040a0, 0x00000000);
1338 nv_wr32(dev, 0x4040a4, 0x00000000);
1339 nv_wr32(dev, 0x4040a8, 0x00000000);
1340 nv_wr32(dev, 0x4040ac, 0x00000000);
1341 nv_wr32(dev, 0x4040b0, 0x00000000);
1342 nv_wr32(dev, 0x4040b4, 0x00000000);
1343 nv_wr32(dev, 0x4040b8, 0x00000000);
1344 nv_wr32(dev, 0x4040bc, 0x00000000);
1345 nv_wr32(dev, 0x4040c0, 0x00000000);
1346 nv_wr32(dev, 0x4040c4, 0x00000000);
1347 nv_wr32(dev, 0x4040c8, 0xf0000087);
1348 nv_wr32(dev, 0x4040d4, 0x00000000);
1349 nv_wr32(dev, 0x4040d8, 0x00000000);
1350 nv_wr32(dev, 0x4040dc, 0x00000000);
1351 nv_wr32(dev, 0x4040e0, 0x00000000);
1352 nv_wr32(dev, 0x4040e4, 0x00000000);
1353 nv_wr32(dev, 0x4040e8, 0x00001000);
1354 nv_wr32(dev, 0x4040f8, 0x00000000);
1355 nv_wr32(dev, 0x404130, 0x00000000);
1356 nv_wr32(dev, 0x404134, 0x00000000);
1357 nv_wr32(dev, 0x404138, 0x20000040);
1358 nv_wr32(dev, 0x404150, 0x0000002e);
1359 nv_wr32(dev, 0x404154, 0x00000400);
1360 nv_wr32(dev, 0x404158, 0x00000200);
1361 nv_wr32(dev, 0x404164, 0x00000055);
1362 nv_wr32(dev, 0x404168, 0x00000000);
1363 nv_wr32(dev, 0x404174, 0x00000000);
1364 nv_wr32(dev, 0x404178, 0x00000000);
1365 nv_wr32(dev, 0x40417c, 0x00000000);
1366 for (i = 0; i < 8; i++)
1367 nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
1368}
1369
1370static void
1371nvc0_grctx_generate_macro(struct drm_device *dev)
1372{
1373 nv_wr32(dev, 0x404404, 0x00000000);
1374 nv_wr32(dev, 0x404408, 0x00000000);
1375 nv_wr32(dev, 0x40440c, 0x00000000);
1376 nv_wr32(dev, 0x404410, 0x00000000);
1377 nv_wr32(dev, 0x404414, 0x00000000);
1378 nv_wr32(dev, 0x404418, 0x00000000);
1379 nv_wr32(dev, 0x40441c, 0x00000000);
1380 nv_wr32(dev, 0x404420, 0x00000000);
1381 nv_wr32(dev, 0x404424, 0x00000000);
1382 nv_wr32(dev, 0x404428, 0x00000000);
1383 nv_wr32(dev, 0x40442c, 0x00000000);
1384 nv_wr32(dev, 0x404430, 0x00000000);
1385 nv_wr32(dev, 0x404434, 0x00000000);
1386 nv_wr32(dev, 0x404438, 0x00000000);
1387 nv_wr32(dev, 0x404460, 0x00000000);
1388 nv_wr32(dev, 0x404464, 0x00000000);
1389 nv_wr32(dev, 0x404468, 0x00ffffff);
1390 nv_wr32(dev, 0x40446c, 0x00000000);
1391 nv_wr32(dev, 0x404480, 0x00000001);
1392 nv_wr32(dev, 0x404498, 0x00000001);
1393}
1394
1395static void
1396nvc0_grctx_generate_m2mf(struct drm_device *dev)
1397{
1398 nv_wr32(dev, 0x404604, 0x00000015);
1399 nv_wr32(dev, 0x404608, 0x00000000);
1400 nv_wr32(dev, 0x40460c, 0x00002e00);
1401 nv_wr32(dev, 0x404610, 0x00000100);
1402 nv_wr32(dev, 0x404618, 0x00000000);
1403 nv_wr32(dev, 0x40461c, 0x00000000);
1404 nv_wr32(dev, 0x404620, 0x00000000);
1405 nv_wr32(dev, 0x404624, 0x00000000);
1406 nv_wr32(dev, 0x404628, 0x00000000);
1407 nv_wr32(dev, 0x40462c, 0x00000000);
1408 nv_wr32(dev, 0x404630, 0x00000000);
1409 nv_wr32(dev, 0x404634, 0x00000000);
1410 nv_wr32(dev, 0x404638, 0x00000004);
1411 nv_wr32(dev, 0x40463c, 0x00000000);
1412 nv_wr32(dev, 0x404640, 0x00000000);
1413 nv_wr32(dev, 0x404644, 0x00000000);
1414 nv_wr32(dev, 0x404648, 0x00000000);
1415 nv_wr32(dev, 0x40464c, 0x00000000);
1416 nv_wr32(dev, 0x404650, 0x00000000);
1417 nv_wr32(dev, 0x404654, 0x00000000);
1418 nv_wr32(dev, 0x404658, 0x00000000);
1419 nv_wr32(dev, 0x40465c, 0x007f0100);
1420 nv_wr32(dev, 0x404660, 0x00000000);
1421 nv_wr32(dev, 0x404664, 0x00000000);
1422 nv_wr32(dev, 0x404668, 0x00000000);
1423 nv_wr32(dev, 0x40466c, 0x00000000);
1424 nv_wr32(dev, 0x404670, 0x00000000);
1425 nv_wr32(dev, 0x404674, 0x00000000);
1426 nv_wr32(dev, 0x404678, 0x00000000);
1427 nv_wr32(dev, 0x40467c, 0x00000002);
1428 nv_wr32(dev, 0x404680, 0x00000000);
1429 nv_wr32(dev, 0x404684, 0x00000000);
1430 nv_wr32(dev, 0x404688, 0x00000000);
1431 nv_wr32(dev, 0x40468c, 0x00000000);
1432 nv_wr32(dev, 0x404690, 0x00000000);
1433 nv_wr32(dev, 0x404694, 0x00000000);
1434 nv_wr32(dev, 0x404698, 0x00000000);
1435 nv_wr32(dev, 0x40469c, 0x00000000);
1436 nv_wr32(dev, 0x4046a0, 0x007f0080);
1437 nv_wr32(dev, 0x4046a4, 0x00000000);
1438 nv_wr32(dev, 0x4046a8, 0x00000000);
1439 nv_wr32(dev, 0x4046ac, 0x00000000);
1440 nv_wr32(dev, 0x4046b0, 0x00000000);
1441 nv_wr32(dev, 0x4046b4, 0x00000000);
1442 nv_wr32(dev, 0x4046b8, 0x00000000);
1443 nv_wr32(dev, 0x4046bc, 0x00000000);
1444 nv_wr32(dev, 0x4046c0, 0x00000000);
1445 nv_wr32(dev, 0x4046c4, 0x00000000);
1446 nv_wr32(dev, 0x4046c8, 0x00000000);
1447 nv_wr32(dev, 0x4046cc, 0x00000000);
1448 nv_wr32(dev, 0x4046d0, 0x00000000);
1449 nv_wr32(dev, 0x4046d4, 0x00000000);
1450 nv_wr32(dev, 0x4046d8, 0x00000000);
1451 nv_wr32(dev, 0x4046dc, 0x00000000);
1452 nv_wr32(dev, 0x4046e0, 0x00000000);
1453 nv_wr32(dev, 0x4046e4, 0x00000000);
1454 nv_wr32(dev, 0x4046e8, 0x00000000);
1455 nv_wr32(dev, 0x4046f0, 0x00000000);
1456 nv_wr32(dev, 0x4046f4, 0x00000000);
1457}
1458
1459static void
1460nvc0_grctx_generate_unk47xx(struct drm_device *dev)
1461{
1462 nv_wr32(dev, 0x404700, 0x00000000);
1463 nv_wr32(dev, 0x404704, 0x00000000);
1464 nv_wr32(dev, 0x404708, 0x00000000);
1465 nv_wr32(dev, 0x40470c, 0x00000000);
1466 nv_wr32(dev, 0x404710, 0x00000000);
1467 nv_wr32(dev, 0x404714, 0x00000000);
1468 nv_wr32(dev, 0x404718, 0x00000000);
1469 nv_wr32(dev, 0x40471c, 0x00000000);
1470 nv_wr32(dev, 0x404720, 0x00000000);
1471 nv_wr32(dev, 0x404724, 0x00000000);
1472 nv_wr32(dev, 0x404728, 0x00000000);
1473 nv_wr32(dev, 0x40472c, 0x00000000);
1474 nv_wr32(dev, 0x404730, 0x00000000);
1475 nv_wr32(dev, 0x404734, 0x00000100);
1476 nv_wr32(dev, 0x404738, 0x00000000);
1477 nv_wr32(dev, 0x40473c, 0x00000000);
1478 nv_wr32(dev, 0x404740, 0x00000000);
1479 nv_wr32(dev, 0x404744, 0x00000000);
1480 nv_wr32(dev, 0x404748, 0x00000000);
1481 nv_wr32(dev, 0x40474c, 0x00000000);
1482 nv_wr32(dev, 0x404750, 0x00000000);
1483 nv_wr32(dev, 0x404754, 0x00000000);
1484}
1485
1486static void
1487nvc0_grctx_generate_shaders(struct drm_device *dev)
1488{
1489 struct drm_nouveau_private *dev_priv = dev->dev_private;
1490
1491 if (dev_priv->chipset == 0xd9) {
1492 nv_wr32(dev, 0x405800, 0x0f8000bf);
1493 nv_wr32(dev, 0x405830, 0x02180218);
1494 nv_wr32(dev, 0x405834, 0x08000000);
1495 } else
1496 if (dev_priv->chipset == 0xc1) {
1497 nv_wr32(dev, 0x405800, 0x0f8000bf);
1498 nv_wr32(dev, 0x405830, 0x02180218);
1499 nv_wr32(dev, 0x405834, 0x00000000);
1500 } else {
1501 nv_wr32(dev, 0x405800, 0x078000bf);
1502 nv_wr32(dev, 0x405830, 0x02180000);
1503 nv_wr32(dev, 0x405834, 0x00000000);
1504 }
1505 nv_wr32(dev, 0x405838, 0x00000000);
1506 nv_wr32(dev, 0x405854, 0x00000000);
1507 nv_wr32(dev, 0x405870, 0x00000001);
1508 nv_wr32(dev, 0x405874, 0x00000001);
1509 nv_wr32(dev, 0x405878, 0x00000001);
1510 nv_wr32(dev, 0x40587c, 0x00000001);
1511 nv_wr32(dev, 0x405a00, 0x00000000);
1512 nv_wr32(dev, 0x405a04, 0x00000000);
1513 nv_wr32(dev, 0x405a18, 0x00000000);
1514}
1515
1516static void
1517nvc0_grctx_generate_unk60xx(struct drm_device *dev)
1518{
1519 nv_wr32(dev, 0x406020, 0x000103c1);
1520 nv_wr32(dev, 0x406028, 0x00000001);
1521 nv_wr32(dev, 0x40602c, 0x00000001);
1522 nv_wr32(dev, 0x406030, 0x00000001);
1523 nv_wr32(dev, 0x406034, 0x00000001);
1524}
1525
1526static void
1527nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1528{
1529 struct drm_nouveau_private *dev_priv = dev->dev_private;
1530
1531 nv_wr32(dev, 0x4064a8, 0x00000000);
1532 nv_wr32(dev, 0x4064ac, 0x00003fff);
1533 nv_wr32(dev, 0x4064b4, 0x00000000);
1534 nv_wr32(dev, 0x4064b8, 0x00000000);
1535 if (dev_priv->chipset == 0xd9)
1536 nv_wr32(dev, 0x4064bc, 0x00000000);
1537 if (dev_priv->chipset == 0xc1 ||
1538 dev_priv->chipset == 0xd9) {
1539 nv_wr32(dev, 0x4064c0, 0x80140078);
1540 nv_wr32(dev, 0x4064c4, 0x0086ffff);
1541 }
1542}
1543
1544static void
1545nvc0_grctx_generate_tpbus(struct drm_device *dev)
1546{
1547 nv_wr32(dev, 0x407804, 0x00000023);
1548 nv_wr32(dev, 0x40780c, 0x0a418820);
1549 nv_wr32(dev, 0x407810, 0x062080e6);
1550 nv_wr32(dev, 0x407814, 0x020398a4);
1551 nv_wr32(dev, 0x407818, 0x0e629062);
1552 nv_wr32(dev, 0x40781c, 0x0a418820);
1553 nv_wr32(dev, 0x407820, 0x000000e6);
1554 nv_wr32(dev, 0x4078bc, 0x00000103);
1555}
1556
1557static void
1558nvc0_grctx_generate_ccache(struct drm_device *dev)
1559{
1560 nv_wr32(dev, 0x408000, 0x00000000);
1561 nv_wr32(dev, 0x408004, 0x00000000);
1562 nv_wr32(dev, 0x408008, 0x00000018);
1563 nv_wr32(dev, 0x40800c, 0x00000000);
1564 nv_wr32(dev, 0x408010, 0x00000000);
1565 nv_wr32(dev, 0x408014, 0x00000069);
1566 nv_wr32(dev, 0x408018, 0xe100e100);
1567 nv_wr32(dev, 0x408064, 0x00000000);
1568}
1569
1570static void
1571nvc0_grctx_generate_rop(struct drm_device *dev)
1572{
1573 struct drm_nouveau_private *dev_priv = dev->dev_private;
1574 int chipset = dev_priv->chipset;
1575
1576 /* ROPC_BROADCAST */
1577 nv_wr32(dev, 0x408800, 0x02802a3c);
1578 nv_wr32(dev, 0x408804, 0x00000040);
1579 if (chipset == 0xd9) {
1580 nv_wr32(dev, 0x408808, 0x1043e005);
1581 nv_wr32(dev, 0x408900, 0x3080b801);
1582 nv_wr32(dev, 0x408904, 0x1043e005);
1583 nv_wr32(dev, 0x408908, 0x00c8102f);
1584 } else
1585 if (chipset == 0xc1) {
1586 nv_wr32(dev, 0x408808, 0x1003e005);
1587 nv_wr32(dev, 0x408900, 0x3080b801);
1588 nv_wr32(dev, 0x408904, 0x62000001);
1589 nv_wr32(dev, 0x408908, 0x00c80929);
1590 } else {
1591 nv_wr32(dev, 0x408808, 0x0003e00d);
1592 nv_wr32(dev, 0x408900, 0x3080b801);
1593 nv_wr32(dev, 0x408904, 0x02000001);
1594 nv_wr32(dev, 0x408908, 0x00c80929);
1595 }
1596 nv_wr32(dev, 0x40890c, 0x00000000);
1597 nv_wr32(dev, 0x408980, 0x0000011d);
1598}
1599
1600static void
1601nvc0_grctx_generate_gpc(struct drm_device *dev)
1602{
1603 struct drm_nouveau_private *dev_priv = dev->dev_private;
1604 int chipset = dev_priv->chipset;
1605 int i;
1606
1607 /* GPC_BROADCAST */
1608 nv_wr32(dev, 0x418380, 0x00000016);
1609 nv_wr32(dev, 0x418400, 0x38004e00);
1610 nv_wr32(dev, 0x418404, 0x71e0ffff);
1611 nv_wr32(dev, 0x418408, 0x00000000);
1612 nv_wr32(dev, 0x41840c, 0x00001008);
1613 nv_wr32(dev, 0x418410, 0x0fff0fff);
1614 nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
1615 nv_wr32(dev, 0x418450, 0x00000000);
1616 nv_wr32(dev, 0x418454, 0x00000000);
1617 nv_wr32(dev, 0x418458, 0x00000000);
1618 nv_wr32(dev, 0x41845c, 0x00000000);
1619 nv_wr32(dev, 0x418460, 0x00000000);
1620 nv_wr32(dev, 0x418464, 0x00000000);
1621 nv_wr32(dev, 0x418468, 0x00000001);
1622 nv_wr32(dev, 0x41846c, 0x00000000);
1623 nv_wr32(dev, 0x418470, 0x00000000);
1624 nv_wr32(dev, 0x418600, 0x0000001f);
1625 nv_wr32(dev, 0x418684, 0x0000000f);
1626 nv_wr32(dev, 0x418700, 0x00000002);
1627 nv_wr32(dev, 0x418704, 0x00000080);
1628 nv_wr32(dev, 0x418708, 0x00000000);
1629 nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
1630 nv_wr32(dev, 0x418710, 0x00000000);
1631 nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
1632 nv_wr32(dev, 0x418808, 0x00000000);
1633 nv_wr32(dev, 0x41880c, 0x00000000);
1634 nv_wr32(dev, 0x418810, 0x00000000);
1635 nv_wr32(dev, 0x418828, 0x00008442);
1636 if (chipset == 0xc1 || chipset == 0xd9)
1637 nv_wr32(dev, 0x418830, 0x10000001);
1638 else
1639 nv_wr32(dev, 0x418830, 0x00000001);
1640 nv_wr32(dev, 0x4188d8, 0x00000008);
1641 nv_wr32(dev, 0x4188e0, 0x01000000);
1642 nv_wr32(dev, 0x4188e8, 0x00000000);
1643 nv_wr32(dev, 0x4188ec, 0x00000000);
1644 nv_wr32(dev, 0x4188f0, 0x00000000);
1645 nv_wr32(dev, 0x4188f4, 0x00000000);
1646 nv_wr32(dev, 0x4188f8, 0x00000000);
1647 if (chipset == 0xd9)
1648 nv_wr32(dev, 0x4188fc, 0x20100008);
1649 else if (chipset == 0xc1)
1650 nv_wr32(dev, 0x4188fc, 0x00100018);
1651 else
1652 nv_wr32(dev, 0x4188fc, 0x00100000);
1653 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1654 nv_wr32(dev, 0x418924, 0x00000000);
1655 nv_wr32(dev, 0x418928, 0x00ffff00);
1656 nv_wr32(dev, 0x41892c, 0x0000ff00);
1657 for (i = 0; i < 8; i++) {
1658 nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
1659 nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
1660 nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
1661 nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
1662 nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
1663 nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
1664 nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
1665 }
1666 nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
1667 nv_wr32(dev, 0x418b08, 0x0a418820);
1668 nv_wr32(dev, 0x418b0c, 0x062080e6);
1669 nv_wr32(dev, 0x418b10, 0x020398a4);
1670 nv_wr32(dev, 0x418b14, 0x0e629062);
1671 nv_wr32(dev, 0x418b18, 0x0a418820);
1672 nv_wr32(dev, 0x418b1c, 0x000000e6);
1673 nv_wr32(dev, 0x418bb8, 0x00000103);
1674 nv_wr32(dev, 0x418c08, 0x00000001);
1675 nv_wr32(dev, 0x418c10, 0x00000000);
1676 nv_wr32(dev, 0x418c14, 0x00000000);
1677 nv_wr32(dev, 0x418c18, 0x00000000);
1678 nv_wr32(dev, 0x418c1c, 0x00000000);
1679 nv_wr32(dev, 0x418c20, 0x00000000);
1680 nv_wr32(dev, 0x418c24, 0x00000000);
1681 nv_wr32(dev, 0x418c28, 0x00000000);
1682 nv_wr32(dev, 0x418c2c, 0x00000000);
1683 if (chipset == 0xc1 || chipset == 0xd9)
1684 nv_wr32(dev, 0x418c6c, 0x00000001);
1685 nv_wr32(dev, 0x418c80, 0x20200004);
1686 nv_wr32(dev, 0x418c8c, 0x00000001);
1687 nv_wr32(dev, 0x419000, 0x00000780);
1688 nv_wr32(dev, 0x419004, 0x00000000);
1689 nv_wr32(dev, 0x419008, 0x00000000);
1690 nv_wr32(dev, 0x419014, 0x00000004);
1691}
1692
1693static void
1694nvc0_grctx_generate_tp(struct drm_device *dev)
1695{
1696 struct drm_nouveau_private *dev_priv = dev->dev_private;
1697 int chipset = dev_priv->chipset;
1698
1699 /* GPC_BROADCAST.TP_BROADCAST */
1700 nv_wr32(dev, 0x419818, 0x00000000);
1701 nv_wr32(dev, 0x41983c, 0x00038bc7);
1702 nv_wr32(dev, 0x419848, 0x00000000);
1703 if (chipset == 0xc1 || chipset == 0xd9)
1704 nv_wr32(dev, 0x419864, 0x00000129);
1705 else
1706 nv_wr32(dev, 0x419864, 0x0000012a);
1707 nv_wr32(dev, 0x419888, 0x00000000);
1708 nv_wr32(dev, 0x419a00, 0x000001f0);
1709 nv_wr32(dev, 0x419a04, 0x00000001);
1710 nv_wr32(dev, 0x419a08, 0x00000023);
1711 nv_wr32(dev, 0x419a0c, 0x00020000);
1712 nv_wr32(dev, 0x419a10, 0x00000000);
1713 nv_wr32(dev, 0x419a14, 0x00000200);
1714 nv_wr32(dev, 0x419a1c, 0x00000000);
1715 nv_wr32(dev, 0x419a20, 0x00000800);
1716 if (chipset == 0xd9)
1717 nv_wr32(dev, 0x00419ac4, 0x0017f440);
1718 else if (chipset != 0xc0 && chipset != 0xc8)
1719 nv_wr32(dev, 0x00419ac4, 0x0007f440);
1720 nv_wr32(dev, 0x419b00, 0x0a418820);
1721 nv_wr32(dev, 0x419b04, 0x062080e6);
1722 nv_wr32(dev, 0x419b08, 0x020398a4);
1723 nv_wr32(dev, 0x419b0c, 0x0e629062);
1724 nv_wr32(dev, 0x419b10, 0x0a418820);
1725 nv_wr32(dev, 0x419b14, 0x000000e6);
1726 nv_wr32(dev, 0x419bd0, 0x00900103);
1727 if (chipset == 0xc1 || chipset == 0xd9)
1728 nv_wr32(dev, 0x419be0, 0x00400001);
1729 else
1730 nv_wr32(dev, 0x419be0, 0x00000001);
1731 nv_wr32(dev, 0x419be4, 0x00000000);
1732 nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
1733 nv_wr32(dev, 0x419c04, 0x00000006);
1734 nv_wr32(dev, 0x419c08, 0x00000002);
1735 nv_wr32(dev, 0x419c20, 0x00000000);
1736 if (dev_priv->chipset == 0xd9) {
1737 nv_wr32(dev, 0x419c24, 0x00084210);
1738 nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
1739 nv_wr32(dev, 0x419cb0, 0x00020048);
1740 } else
1741 if (chipset == 0xce || chipset == 0xcf) {
1742 nv_wr32(dev, 0x419cb0, 0x00020048);
1743 } else {
1744 nv_wr32(dev, 0x419cb0, 0x00060048);
1745 }
1746 nv_wr32(dev, 0x419ce8, 0x00000000);
1747 nv_wr32(dev, 0x419cf4, 0x00000183);
1748 if (chipset == 0xc1 || chipset == 0xd9)
1749 nv_wr32(dev, 0x419d20, 0x12180000);
1750 else
1751 nv_wr32(dev, 0x419d20, 0x02180000);
1752 nv_wr32(dev, 0x419d24, 0x00001fff);
1753 if (chipset == 0xc1 || chipset == 0xd9)
1754 nv_wr32(dev, 0x419d44, 0x02180218);
1755 nv_wr32(dev, 0x419e04, 0x00000000);
1756 nv_wr32(dev, 0x419e08, 0x00000000);
1757 nv_wr32(dev, 0x419e0c, 0x00000000);
1758 nv_wr32(dev, 0x419e10, 0x00000002);
1759 nv_wr32(dev, 0x419e44, 0x001beff2);
1760 nv_wr32(dev, 0x419e48, 0x00000000);
1761 nv_wr32(dev, 0x419e4c, 0x0000000f);
1762 nv_wr32(dev, 0x419e50, 0x00000000);
1763 nv_wr32(dev, 0x419e54, 0x00000000);
1764 nv_wr32(dev, 0x419e58, 0x00000000);
1765 nv_wr32(dev, 0x419e5c, 0x00000000);
1766 nv_wr32(dev, 0x419e60, 0x00000000);
1767 nv_wr32(dev, 0x419e64, 0x00000000);
1768 nv_wr32(dev, 0x419e68, 0x00000000);
1769 nv_wr32(dev, 0x419e6c, 0x00000000);
1770 nv_wr32(dev, 0x419e70, 0x00000000);
1771 nv_wr32(dev, 0x419e74, 0x00000000);
1772 nv_wr32(dev, 0x419e78, 0x00000000);
1773 nv_wr32(dev, 0x419e7c, 0x00000000);
1774 nv_wr32(dev, 0x419e80, 0x00000000);
1775 nv_wr32(dev, 0x419e84, 0x00000000);
1776 nv_wr32(dev, 0x419e88, 0x00000000);
1777 nv_wr32(dev, 0x419e8c, 0x00000000);
1778 nv_wr32(dev, 0x419e90, 0x00000000);
1779 nv_wr32(dev, 0x419e98, 0x00000000);
1780 if (chipset != 0xc0 && chipset != 0xc8)
1781 nv_wr32(dev, 0x419ee0, 0x00011110);
1782 nv_wr32(dev, 0x419f50, 0x00000000);
1783 nv_wr32(dev, 0x419f54, 0x00000000);
1784 if (chipset != 0xc0 && chipset != 0xc8)
1785 nv_wr32(dev, 0x419f58, 0x00000000);
1786}
1787
1788int
1789nvc0_grctx_generate(struct nouveau_channel *chan)
1790{
1791 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1792 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
1793 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1794 struct drm_device *dev = chan->dev;
1795 int i, gpc, tp, id;
1796 u32 fermi = nvc0_graph_class(dev);
1797 u32 r000260, tmp;
1798
1799 r000260 = nv_rd32(dev, 0x000260);
1800 nv_wr32(dev, 0x000260, r000260 & ~1);
1801 nv_wr32(dev, 0x400208, 0x00000000);
1802
1803 nvc0_grctx_generate_dispatch(dev);
1804 nvc0_grctx_generate_macro(dev);
1805 nvc0_grctx_generate_m2mf(dev);
1806 nvc0_grctx_generate_unk47xx(dev);
1807 nvc0_grctx_generate_shaders(dev);
1808 nvc0_grctx_generate_unk60xx(dev);
1809 nvc0_grctx_generate_unk64xx(dev);
1810 nvc0_grctx_generate_tpbus(dev);
1811 nvc0_grctx_generate_ccache(dev);
1812 nvc0_grctx_generate_rop(dev);
1813 nvc0_grctx_generate_gpc(dev);
1814 nvc0_grctx_generate_tp(dev);
1815
1816 nv_wr32(dev, 0x404154, 0x00000000);
1817
1818 /* fuc "mmio list" writes */
1819 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
1820 u32 reg = nv_ro32(grch->mmio, i + 0);
1821 nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
1822 }
1823
1824 for (tp = 0, id = 0; tp < 4; tp++) {
1825 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1826 if (tp < priv->tp_nr[gpc]) {
1827 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
1828 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
1829 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
1830 nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
1831 id++;
1832 }
1833
1834 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
1835 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
1836 }
1837 }
1838
1839 tmp = 0;
1840 for (i = 0; i < priv->gpc_nr; i++)
1841 tmp |= priv->tp_nr[i] << (i * 4);
1842 nv_wr32(dev, 0x406028, tmp);
1843 nv_wr32(dev, 0x405870, tmp);
1844
1845 nv_wr32(dev, 0x40602c, 0x00000000);
1846 nv_wr32(dev, 0x405874, 0x00000000);
1847 nv_wr32(dev, 0x406030, 0x00000000);
1848 nv_wr32(dev, 0x405878, 0x00000000);
1849 nv_wr32(dev, 0x406034, 0x00000000);
1850 nv_wr32(dev, 0x40587c, 0x00000000);
1851
1852 if (1) {
1853 u8 tpnr[GPC_MAX], data[TP_MAX];
1854
1855 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1856 memset(data, 0x1f, sizeof(data));
1857
1858 gpc = -1;
1859 for (tp = 0; tp < priv->tp_total; tp++) {
1860 do {
1861 gpc = (gpc + 1) % priv->gpc_nr;
1862 } while (!tpnr[gpc]);
1863 tpnr[gpc]--;
1864 data[tp] = gpc;
1865 }
1866
1867 for (i = 0; i < 4; i++)
1868 nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1869 }
1870
1871 if (1) {
1872 u32 data[6] = {}, data2[2] = {};
1873 u8 tpnr[GPC_MAX];
1874 u8 shift, ntpcv;
1875
1876 /* calculate first set of magics */
1877 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1878
1879 gpc = -1;
1880 for (tp = 0; tp < priv->tp_total; tp++) {
1881 do {
1882 gpc = (gpc + 1) % priv->gpc_nr;
1883 } while (!tpnr[gpc]);
1884 tpnr[gpc]--;
1885
1886 data[tp / 6] |= gpc << ((tp % 6) * 5);
1887 }
1888
1889 for (; tp < 32; tp++)
1890 data[tp / 6] |= 7 << ((tp % 6) * 5);
1891
1892 /* and the second... */
1893 shift = 0;
1894 ntpcv = priv->tp_total;
1895 while (!(ntpcv & (1 << 4))) {
1896 ntpcv <<= 1;
1897 shift++;
1898 }
1899
1900 data2[0] = (ntpcv << 16);
1901 data2[0] |= (shift << 21);
1902 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
1903 for (i = 1; i < 7; i++)
1904 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1905
1906 /* GPC_BROADCAST */
1907 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
1908 priv->magic_not_rop_nr);
1909 for (i = 0; i < 6; i++)
1910 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
1911
1912 /* GPC_BROADCAST.TP_BROADCAST */
1913 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
1914 priv->magic_not_rop_nr |
1915 data2[0]);
1916 nv_wr32(dev, 0x419be4, data2[1]);
1917 for (i = 0; i < 6; i++)
1918 nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
1919
1920 /* UNK78xx */
1921 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
1922 priv->magic_not_rop_nr);
1923 for (i = 0; i < 6; i++)
1924 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
1925 }
1926
1927 if (1) {
1928 u32 tp_mask = 0, tp_set = 0;
1929 u8 tpnr[GPC_MAX], a, b;
1930
1931 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1932 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1933 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1934
1935 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1936 a = (i * (priv->tp_total - 1)) / 32;
1937 if (a != b) {
1938 b = a;
1939 do {
1940 gpc = (gpc + 1) % priv->gpc_nr;
1941 } while (!tpnr[gpc]);
1942 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1943
1944 tp_set |= 1 << ((gpc * 8) + tp);
1945 }
1946
1947 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1948 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1949 }
1950 }
1951
1952 nv_wr32(dev, 0x400208, 0x80000000);
1953
1954 nv_icmd(dev, 0x00001000, 0x00000004);
1955 nv_icmd(dev, 0x000000a9, 0x0000ffff);
1956 nv_icmd(dev, 0x00000038, 0x0fac6881);
1957 nv_icmd(dev, 0x0000003d, 0x00000001);
1958 nv_icmd(dev, 0x000000e8, 0x00000400);
1959 nv_icmd(dev, 0x000000e9, 0x00000400);
1960 nv_icmd(dev, 0x000000ea, 0x00000400);
1961 nv_icmd(dev, 0x000000eb, 0x00000400);
1962 nv_icmd(dev, 0x000000ec, 0x00000400);
1963 nv_icmd(dev, 0x000000ed, 0x00000400);
1964 nv_icmd(dev, 0x000000ee, 0x00000400);
1965 nv_icmd(dev, 0x000000ef, 0x00000400);
1966 nv_icmd(dev, 0x00000078, 0x00000300);
1967 nv_icmd(dev, 0x00000079, 0x00000300);
1968 nv_icmd(dev, 0x0000007a, 0x00000300);
1969 nv_icmd(dev, 0x0000007b, 0x00000300);
1970 nv_icmd(dev, 0x0000007c, 0x00000300);
1971 nv_icmd(dev, 0x0000007d, 0x00000300);
1972 nv_icmd(dev, 0x0000007e, 0x00000300);
1973 nv_icmd(dev, 0x0000007f, 0x00000300);
1974 nv_icmd(dev, 0x00000050, 0x00000011);
1975 nv_icmd(dev, 0x00000058, 0x00000008);
1976 nv_icmd(dev, 0x00000059, 0x00000008);
1977 nv_icmd(dev, 0x0000005a, 0x00000008);
1978 nv_icmd(dev, 0x0000005b, 0x00000008);
1979 nv_icmd(dev, 0x0000005c, 0x00000008);
1980 nv_icmd(dev, 0x0000005d, 0x00000008);
1981 nv_icmd(dev, 0x0000005e, 0x00000008);
1982 nv_icmd(dev, 0x0000005f, 0x00000008);
1983 nv_icmd(dev, 0x00000208, 0x00000001);
1984 nv_icmd(dev, 0x00000209, 0x00000001);
1985 nv_icmd(dev, 0x0000020a, 0x00000001);
1986 nv_icmd(dev, 0x0000020b, 0x00000001);
1987 nv_icmd(dev, 0x0000020c, 0x00000001);
1988 nv_icmd(dev, 0x0000020d, 0x00000001);
1989 nv_icmd(dev, 0x0000020e, 0x00000001);
1990 nv_icmd(dev, 0x0000020f, 0x00000001);
1991 nv_icmd(dev, 0x00000081, 0x00000001);
1992 nv_icmd(dev, 0x00000085, 0x00000004);
1993 nv_icmd(dev, 0x00000088, 0x00000400);
1994 nv_icmd(dev, 0x00000090, 0x00000300);
1995 nv_icmd(dev, 0x00000098, 0x00001001);
1996 nv_icmd(dev, 0x000000e3, 0x00000001);
1997 nv_icmd(dev, 0x000000da, 0x00000001);
1998 nv_icmd(dev, 0x000000f8, 0x00000003);
1999 nv_icmd(dev, 0x000000fa, 0x00000001);
2000 nv_icmd(dev, 0x0000009f, 0x0000ffff);
2001 nv_icmd(dev, 0x000000a0, 0x0000ffff);
2002 nv_icmd(dev, 0x000000a1, 0x0000ffff);
2003 nv_icmd(dev, 0x000000a2, 0x0000ffff);
2004 nv_icmd(dev, 0x000000b1, 0x00000001);
2005 nv_icmd(dev, 0x000000b2, 0x00000000);
2006 nv_icmd(dev, 0x000000b3, 0x00000000);
2007 nv_icmd(dev, 0x000000b4, 0x00000000);
2008 nv_icmd(dev, 0x000000b5, 0x00000000);
2009 nv_icmd(dev, 0x000000b6, 0x00000000);
2010 nv_icmd(dev, 0x000000b7, 0x00000000);
2011 nv_icmd(dev, 0x000000b8, 0x00000000);
2012 nv_icmd(dev, 0x000000b9, 0x00000000);
2013 nv_icmd(dev, 0x000000ba, 0x00000000);
2014 nv_icmd(dev, 0x000000bb, 0x00000000);
2015 nv_icmd(dev, 0x000000bc, 0x00000000);
2016 nv_icmd(dev, 0x000000bd, 0x00000000);
2017 nv_icmd(dev, 0x000000be, 0x00000000);
2018 nv_icmd(dev, 0x000000bf, 0x00000000);
2019 nv_icmd(dev, 0x000000c0, 0x00000000);
2020 nv_icmd(dev, 0x000000c1, 0x00000000);
2021 nv_icmd(dev, 0x000000c2, 0x00000000);
2022 nv_icmd(dev, 0x000000c3, 0x00000000);
2023 nv_icmd(dev, 0x000000c4, 0x00000000);
2024 nv_icmd(dev, 0x000000c5, 0x00000000);
2025 nv_icmd(dev, 0x000000c6, 0x00000000);
2026 nv_icmd(dev, 0x000000c7, 0x00000000);
2027 nv_icmd(dev, 0x000000c8, 0x00000000);
2028 nv_icmd(dev, 0x000000c9, 0x00000000);
2029 nv_icmd(dev, 0x000000ca, 0x00000000);
2030 nv_icmd(dev, 0x000000cb, 0x00000000);
2031 nv_icmd(dev, 0x000000cc, 0x00000000);
2032 nv_icmd(dev, 0x000000cd, 0x00000000);
2033 nv_icmd(dev, 0x000000ce, 0x00000000);
2034 nv_icmd(dev, 0x000000cf, 0x00000000);
2035 nv_icmd(dev, 0x000000d0, 0x00000000);
2036 nv_icmd(dev, 0x000000d1, 0x00000000);
2037 nv_icmd(dev, 0x000000d2, 0x00000000);
2038 nv_icmd(dev, 0x000000d3, 0x00000000);
2039 nv_icmd(dev, 0x000000d4, 0x00000000);
2040 nv_icmd(dev, 0x000000d5, 0x00000000);
2041 nv_icmd(dev, 0x000000d6, 0x00000000);
2042 nv_icmd(dev, 0x000000d7, 0x00000000);
2043 nv_icmd(dev, 0x000000d8, 0x00000000);
2044 nv_icmd(dev, 0x000000d9, 0x00000000);
2045 nv_icmd(dev, 0x00000210, 0x00000040);
2046 nv_icmd(dev, 0x00000211, 0x00000040);
2047 nv_icmd(dev, 0x00000212, 0x00000040);
2048 nv_icmd(dev, 0x00000213, 0x00000040);
2049 nv_icmd(dev, 0x00000214, 0x00000040);
2050 nv_icmd(dev, 0x00000215, 0x00000040);
2051 nv_icmd(dev, 0x00000216, 0x00000040);
2052 nv_icmd(dev, 0x00000217, 0x00000040);
2053 if (dev_priv->chipset == 0xd9) {
2054 for (i = 0x0400; i <= 0x0417; i++)
2055 nv_icmd(dev, i, 0x00000040);
2056 }
2057 nv_icmd(dev, 0x00000218, 0x0000c080);
2058 nv_icmd(dev, 0x00000219, 0x0000c080);
2059 nv_icmd(dev, 0x0000021a, 0x0000c080);
2060 nv_icmd(dev, 0x0000021b, 0x0000c080);
2061 nv_icmd(dev, 0x0000021c, 0x0000c080);
2062 nv_icmd(dev, 0x0000021d, 0x0000c080);
2063 nv_icmd(dev, 0x0000021e, 0x0000c080);
2064 nv_icmd(dev, 0x0000021f, 0x0000c080);
2065 if (dev_priv->chipset == 0xd9) {
2066 for (i = 0x0440; i <= 0x0457; i++)
2067 nv_icmd(dev, i, 0x0000c080);
2068 }
2069 nv_icmd(dev, 0x000000ad, 0x0000013e);
2070 nv_icmd(dev, 0x000000e1, 0x00000010);
2071 nv_icmd(dev, 0x00000290, 0x00000000);
2072 nv_icmd(dev, 0x00000291, 0x00000000);
2073 nv_icmd(dev, 0x00000292, 0x00000000);
2074 nv_icmd(dev, 0x00000293, 0x00000000);
2075 nv_icmd(dev, 0x00000294, 0x00000000);
2076 nv_icmd(dev, 0x00000295, 0x00000000);
2077 nv_icmd(dev, 0x00000296, 0x00000000);
2078 nv_icmd(dev, 0x00000297, 0x00000000);
2079 nv_icmd(dev, 0x00000298, 0x00000000);
2080 nv_icmd(dev, 0x00000299, 0x00000000);
2081 nv_icmd(dev, 0x0000029a, 0x00000000);
2082 nv_icmd(dev, 0x0000029b, 0x00000000);
2083 nv_icmd(dev, 0x0000029c, 0x00000000);
2084 nv_icmd(dev, 0x0000029d, 0x00000000);
2085 nv_icmd(dev, 0x0000029e, 0x00000000);
2086 nv_icmd(dev, 0x0000029f, 0x00000000);
2087 nv_icmd(dev, 0x000003b0, 0x00000000);
2088 nv_icmd(dev, 0x000003b1, 0x00000000);
2089 nv_icmd(dev, 0x000003b2, 0x00000000);
2090 nv_icmd(dev, 0x000003b3, 0x00000000);
2091 nv_icmd(dev, 0x000003b4, 0x00000000);
2092 nv_icmd(dev, 0x000003b5, 0x00000000);
2093 nv_icmd(dev, 0x000003b6, 0x00000000);
2094 nv_icmd(dev, 0x000003b7, 0x00000000);
2095 nv_icmd(dev, 0x000003b8, 0x00000000);
2096 nv_icmd(dev, 0x000003b9, 0x00000000);
2097 nv_icmd(dev, 0x000003ba, 0x00000000);
2098 nv_icmd(dev, 0x000003bb, 0x00000000);
2099 nv_icmd(dev, 0x000003bc, 0x00000000);
2100 nv_icmd(dev, 0x000003bd, 0x00000000);
2101 nv_icmd(dev, 0x000003be, 0x00000000);
2102 nv_icmd(dev, 0x000003bf, 0x00000000);
2103 nv_icmd(dev, 0x000002a0, 0x00000000);
2104 nv_icmd(dev, 0x000002a1, 0x00000000);
2105 nv_icmd(dev, 0x000002a2, 0x00000000);
2106 nv_icmd(dev, 0x000002a3, 0x00000000);
2107 nv_icmd(dev, 0x000002a4, 0x00000000);
2108 nv_icmd(dev, 0x000002a5, 0x00000000);
2109 nv_icmd(dev, 0x000002a6, 0x00000000);
2110 nv_icmd(dev, 0x000002a7, 0x00000000);
2111 nv_icmd(dev, 0x000002a8, 0x00000000);
2112 nv_icmd(dev, 0x000002a9, 0x00000000);
2113 nv_icmd(dev, 0x000002aa, 0x00000000);
2114 nv_icmd(dev, 0x000002ab, 0x00000000);
2115 nv_icmd(dev, 0x000002ac, 0x00000000);
2116 nv_icmd(dev, 0x000002ad, 0x00000000);
2117 nv_icmd(dev, 0x000002ae, 0x00000000);
2118 nv_icmd(dev, 0x000002af, 0x00000000);
2119 nv_icmd(dev, 0x00000420, 0x00000000);
2120 nv_icmd(dev, 0x00000421, 0x00000000);
2121 nv_icmd(dev, 0x00000422, 0x00000000);
2122 nv_icmd(dev, 0x00000423, 0x00000000);
2123 nv_icmd(dev, 0x00000424, 0x00000000);
2124 nv_icmd(dev, 0x00000425, 0x00000000);
2125 nv_icmd(dev, 0x00000426, 0x00000000);
2126 nv_icmd(dev, 0x00000427, 0x00000000);
2127 nv_icmd(dev, 0x00000428, 0x00000000);
2128 nv_icmd(dev, 0x00000429, 0x00000000);
2129 nv_icmd(dev, 0x0000042a, 0x00000000);
2130 nv_icmd(dev, 0x0000042b, 0x00000000);
2131 nv_icmd(dev, 0x0000042c, 0x00000000);
2132 nv_icmd(dev, 0x0000042d, 0x00000000);
2133 nv_icmd(dev, 0x0000042e, 0x00000000);
2134 nv_icmd(dev, 0x0000042f, 0x00000000);
2135 nv_icmd(dev, 0x000002b0, 0x00000000);
2136 nv_icmd(dev, 0x000002b1, 0x00000000);
2137 nv_icmd(dev, 0x000002b2, 0x00000000);
2138 nv_icmd(dev, 0x000002b3, 0x00000000);
2139 nv_icmd(dev, 0x000002b4, 0x00000000);
2140 nv_icmd(dev, 0x000002b5, 0x00000000);
2141 nv_icmd(dev, 0x000002b6, 0x00000000);
2142 nv_icmd(dev, 0x000002b7, 0x00000000);
2143 nv_icmd(dev, 0x000002b8, 0x00000000);
2144 nv_icmd(dev, 0x000002b9, 0x00000000);
2145 nv_icmd(dev, 0x000002ba, 0x00000000);
2146 nv_icmd(dev, 0x000002bb, 0x00000000);
2147 nv_icmd(dev, 0x000002bc, 0x00000000);
2148 nv_icmd(dev, 0x000002bd, 0x00000000);
2149 nv_icmd(dev, 0x000002be, 0x00000000);
2150 nv_icmd(dev, 0x000002bf, 0x00000000);
2151 nv_icmd(dev, 0x00000430, 0x00000000);
2152 nv_icmd(dev, 0x00000431, 0x00000000);
2153 nv_icmd(dev, 0x00000432, 0x00000000);
2154 nv_icmd(dev, 0x00000433, 0x00000000);
2155 nv_icmd(dev, 0x00000434, 0x00000000);
2156 nv_icmd(dev, 0x00000435, 0x00000000);
2157 nv_icmd(dev, 0x00000436, 0x00000000);
2158 nv_icmd(dev, 0x00000437, 0x00000000);
2159 nv_icmd(dev, 0x00000438, 0x00000000);
2160 nv_icmd(dev, 0x00000439, 0x00000000);
2161 nv_icmd(dev, 0x0000043a, 0x00000000);
2162 nv_icmd(dev, 0x0000043b, 0x00000000);
2163 nv_icmd(dev, 0x0000043c, 0x00000000);
2164 nv_icmd(dev, 0x0000043d, 0x00000000);
2165 nv_icmd(dev, 0x0000043e, 0x00000000);
2166 nv_icmd(dev, 0x0000043f, 0x00000000);
2167 nv_icmd(dev, 0x000002c0, 0x00000000);
2168 nv_icmd(dev, 0x000002c1, 0x00000000);
2169 nv_icmd(dev, 0x000002c2, 0x00000000);
2170 nv_icmd(dev, 0x000002c3, 0x00000000);
2171 nv_icmd(dev, 0x000002c4, 0x00000000);
2172 nv_icmd(dev, 0x000002c5, 0x00000000);
2173 nv_icmd(dev, 0x000002c6, 0x00000000);
2174 nv_icmd(dev, 0x000002c7, 0x00000000);
2175 nv_icmd(dev, 0x000002c8, 0x00000000);
2176 nv_icmd(dev, 0x000002c9, 0x00000000);
2177 nv_icmd(dev, 0x000002ca, 0x00000000);
2178 nv_icmd(dev, 0x000002cb, 0x00000000);
2179 nv_icmd(dev, 0x000002cc, 0x00000000);
2180 nv_icmd(dev, 0x000002cd, 0x00000000);
2181 nv_icmd(dev, 0x000002ce, 0x00000000);
2182 nv_icmd(dev, 0x000002cf, 0x00000000);
2183 nv_icmd(dev, 0x000004d0, 0x00000000);
2184 nv_icmd(dev, 0x000004d1, 0x00000000);
2185 nv_icmd(dev, 0x000004d2, 0x00000000);
2186 nv_icmd(dev, 0x000004d3, 0x00000000);
2187 nv_icmd(dev, 0x000004d4, 0x00000000);
2188 nv_icmd(dev, 0x000004d5, 0x00000000);
2189 nv_icmd(dev, 0x000004d6, 0x00000000);
2190 nv_icmd(dev, 0x000004d7, 0x00000000);
2191 nv_icmd(dev, 0x000004d8, 0x00000000);
2192 nv_icmd(dev, 0x000004d9, 0x00000000);
2193 nv_icmd(dev, 0x000004da, 0x00000000);
2194 nv_icmd(dev, 0x000004db, 0x00000000);
2195 nv_icmd(dev, 0x000004dc, 0x00000000);
2196 nv_icmd(dev, 0x000004dd, 0x00000000);
2197 nv_icmd(dev, 0x000004de, 0x00000000);
2198 nv_icmd(dev, 0x000004df, 0x00000000);
2199 nv_icmd(dev, 0x00000720, 0x00000000);
2200 nv_icmd(dev, 0x00000721, 0x00000000);
2201 nv_icmd(dev, 0x00000722, 0x00000000);
2202 nv_icmd(dev, 0x00000723, 0x00000000);
2203 nv_icmd(dev, 0x00000724, 0x00000000);
2204 nv_icmd(dev, 0x00000725, 0x00000000);
2205 nv_icmd(dev, 0x00000726, 0x00000000);
2206 nv_icmd(dev, 0x00000727, 0x00000000);
2207 nv_icmd(dev, 0x00000728, 0x00000000);
2208 nv_icmd(dev, 0x00000729, 0x00000000);
2209 nv_icmd(dev, 0x0000072a, 0x00000000);
2210 nv_icmd(dev, 0x0000072b, 0x00000000);
2211 nv_icmd(dev, 0x0000072c, 0x00000000);
2212 nv_icmd(dev, 0x0000072d, 0x00000000);
2213 nv_icmd(dev, 0x0000072e, 0x00000000);
2214 nv_icmd(dev, 0x0000072f, 0x00000000);
2215 nv_icmd(dev, 0x000008c0, 0x00000000);
2216 nv_icmd(dev, 0x000008c1, 0x00000000);
2217 nv_icmd(dev, 0x000008c2, 0x00000000);
2218 nv_icmd(dev, 0x000008c3, 0x00000000);
2219 nv_icmd(dev, 0x000008c4, 0x00000000);
2220 nv_icmd(dev, 0x000008c5, 0x00000000);
2221 nv_icmd(dev, 0x000008c6, 0x00000000);
2222 nv_icmd(dev, 0x000008c7, 0x00000000);
2223 nv_icmd(dev, 0x000008c8, 0x00000000);
2224 nv_icmd(dev, 0x000008c9, 0x00000000);
2225 nv_icmd(dev, 0x000008ca, 0x00000000);
2226 nv_icmd(dev, 0x000008cb, 0x00000000);
2227 nv_icmd(dev, 0x000008cc, 0x00000000);
2228 nv_icmd(dev, 0x000008cd, 0x00000000);
2229 nv_icmd(dev, 0x000008ce, 0x00000000);
2230 nv_icmd(dev, 0x000008cf, 0x00000000);
2231 nv_icmd(dev, 0x00000890, 0x00000000);
2232 nv_icmd(dev, 0x00000891, 0x00000000);
2233 nv_icmd(dev, 0x00000892, 0x00000000);
2234 nv_icmd(dev, 0x00000893, 0x00000000);
2235 nv_icmd(dev, 0x00000894, 0x00000000);
2236 nv_icmd(dev, 0x00000895, 0x00000000);
2237 nv_icmd(dev, 0x00000896, 0x00000000);
2238 nv_icmd(dev, 0x00000897, 0x00000000);
2239 nv_icmd(dev, 0x00000898, 0x00000000);
2240 nv_icmd(dev, 0x00000899, 0x00000000);
2241 nv_icmd(dev, 0x0000089a, 0x00000000);
2242 nv_icmd(dev, 0x0000089b, 0x00000000);
2243 nv_icmd(dev, 0x0000089c, 0x00000000);
2244 nv_icmd(dev, 0x0000089d, 0x00000000);
2245 nv_icmd(dev, 0x0000089e, 0x00000000);
2246 nv_icmd(dev, 0x0000089f, 0x00000000);
2247 nv_icmd(dev, 0x000008e0, 0x00000000);
2248 nv_icmd(dev, 0x000008e1, 0x00000000);
2249 nv_icmd(dev, 0x000008e2, 0x00000000);
2250 nv_icmd(dev, 0x000008e3, 0x00000000);
2251 nv_icmd(dev, 0x000008e4, 0x00000000);
2252 nv_icmd(dev, 0x000008e5, 0x00000000);
2253 nv_icmd(dev, 0x000008e6, 0x00000000);
2254 nv_icmd(dev, 0x000008e7, 0x00000000);
2255 nv_icmd(dev, 0x000008e8, 0x00000000);
2256 nv_icmd(dev, 0x000008e9, 0x00000000);
2257 nv_icmd(dev, 0x000008ea, 0x00000000);
2258 nv_icmd(dev, 0x000008eb, 0x00000000);
2259 nv_icmd(dev, 0x000008ec, 0x00000000);
2260 nv_icmd(dev, 0x000008ed, 0x00000000);
2261 nv_icmd(dev, 0x000008ee, 0x00000000);
2262 nv_icmd(dev, 0x000008ef, 0x00000000);
2263 nv_icmd(dev, 0x000008a0, 0x00000000);
2264 nv_icmd(dev, 0x000008a1, 0x00000000);
2265 nv_icmd(dev, 0x000008a2, 0x00000000);
2266 nv_icmd(dev, 0x000008a3, 0x00000000);
2267 nv_icmd(dev, 0x000008a4, 0x00000000);
2268 nv_icmd(dev, 0x000008a5, 0x00000000);
2269 nv_icmd(dev, 0x000008a6, 0x00000000);
2270 nv_icmd(dev, 0x000008a7, 0x00000000);
2271 nv_icmd(dev, 0x000008a8, 0x00000000);
2272 nv_icmd(dev, 0x000008a9, 0x00000000);
2273 nv_icmd(dev, 0x000008aa, 0x00000000);
2274 nv_icmd(dev, 0x000008ab, 0x00000000);
2275 nv_icmd(dev, 0x000008ac, 0x00000000);
2276 nv_icmd(dev, 0x000008ad, 0x00000000);
2277 nv_icmd(dev, 0x000008ae, 0x00000000);
2278 nv_icmd(dev, 0x000008af, 0x00000000);
2279 nv_icmd(dev, 0x000008f0, 0x00000000);
2280 nv_icmd(dev, 0x000008f1, 0x00000000);
2281 nv_icmd(dev, 0x000008f2, 0x00000000);
2282 nv_icmd(dev, 0x000008f3, 0x00000000);
2283 nv_icmd(dev, 0x000008f4, 0x00000000);
2284 nv_icmd(dev, 0x000008f5, 0x00000000);
2285 nv_icmd(dev, 0x000008f6, 0x00000000);
2286 nv_icmd(dev, 0x000008f7, 0x00000000);
2287 nv_icmd(dev, 0x000008f8, 0x00000000);
2288 nv_icmd(dev, 0x000008f9, 0x00000000);
2289 nv_icmd(dev, 0x000008fa, 0x00000000);
2290 nv_icmd(dev, 0x000008fb, 0x00000000);
2291 nv_icmd(dev, 0x000008fc, 0x00000000);
2292 nv_icmd(dev, 0x000008fd, 0x00000000);
2293 nv_icmd(dev, 0x000008fe, 0x00000000);
2294 nv_icmd(dev, 0x000008ff, 0x00000000);
2295 nv_icmd(dev, 0x0000094c, 0x000000ff);
2296 nv_icmd(dev, 0x0000094d, 0xffffffff);
2297 nv_icmd(dev, 0x0000094e, 0x00000002);
2298 nv_icmd(dev, 0x000002ec, 0x00000001);
2299 nv_icmd(dev, 0x00000303, 0x00000001);
2300 nv_icmd(dev, 0x000002e6, 0x00000001);
2301 nv_icmd(dev, 0x00000466, 0x00000052);
2302 nv_icmd(dev, 0x00000301, 0x3f800000);
2303 nv_icmd(dev, 0x00000304, 0x30201000);
2304 nv_icmd(dev, 0x00000305, 0x70605040);
2305 nv_icmd(dev, 0x00000306, 0xb8a89888);
2306 nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
2307 nv_icmd(dev, 0x0000030a, 0x00ffff00);
2308 nv_icmd(dev, 0x0000030b, 0x0000001a);
2309 nv_icmd(dev, 0x0000030c, 0x00000001);
2310 nv_icmd(dev, 0x00000318, 0x00000001);
2311 nv_icmd(dev, 0x00000340, 0x00000000);
2312 nv_icmd(dev, 0x00000375, 0x00000001);
2313 nv_icmd(dev, 0x00000351, 0x00000100);
2314 nv_icmd(dev, 0x0000037d, 0x00000006);
2315 nv_icmd(dev, 0x000003a0, 0x00000002);
2316 nv_icmd(dev, 0x000003aa, 0x00000001);
2317 nv_icmd(dev, 0x000003a9, 0x00000001);
2318 nv_icmd(dev, 0x00000380, 0x00000001);
2319 nv_icmd(dev, 0x00000360, 0x00000040);
2320 nv_icmd(dev, 0x00000366, 0x00000000);
2321 nv_icmd(dev, 0x00000367, 0x00000000);
2322 nv_icmd(dev, 0x00000368, 0x00001fff);
2323 nv_icmd(dev, 0x00000370, 0x00000000);
2324 nv_icmd(dev, 0x00000371, 0x00000000);
2325 nv_icmd(dev, 0x00000372, 0x003fffff);
2326 nv_icmd(dev, 0x0000037a, 0x00000012);
2327 nv_icmd(dev, 0x000005e0, 0x00000022);
2328 nv_icmd(dev, 0x000005e1, 0x00000022);
2329 nv_icmd(dev, 0x000005e2, 0x00000022);
2330 nv_icmd(dev, 0x000005e3, 0x00000022);
2331 nv_icmd(dev, 0x000005e4, 0x00000022);
2332 nv_icmd(dev, 0x00000619, 0x00000003);
2333 nv_icmd(dev, 0x00000811, 0x00000003);
2334 nv_icmd(dev, 0x00000812, 0x00000004);
2335 nv_icmd(dev, 0x00000813, 0x00000006);
2336 nv_icmd(dev, 0x00000814, 0x00000008);
2337 nv_icmd(dev, 0x00000815, 0x0000000b);
2338 nv_icmd(dev, 0x00000800, 0x00000001);
2339 nv_icmd(dev, 0x00000801, 0x00000001);
2340 nv_icmd(dev, 0x00000802, 0x00000001);
2341 nv_icmd(dev, 0x00000803, 0x00000001);
2342 nv_icmd(dev, 0x00000804, 0x00000001);
2343 nv_icmd(dev, 0x00000805, 0x00000001);
2344 nv_icmd(dev, 0x00000632, 0x00000001);
2345 nv_icmd(dev, 0x00000633, 0x00000002);
2346 nv_icmd(dev, 0x00000634, 0x00000003);
2347 nv_icmd(dev, 0x00000635, 0x00000004);
2348 nv_icmd(dev, 0x00000654, 0x3f800000);
2349 nv_icmd(dev, 0x00000657, 0x3f800000);
2350 nv_icmd(dev, 0x00000655, 0x3f800000);
2351 nv_icmd(dev, 0x00000656, 0x3f800000);
2352 nv_icmd(dev, 0x000006cd, 0x3f800000);
2353 nv_icmd(dev, 0x000007f5, 0x3f800000);
2354 nv_icmd(dev, 0x000007dc, 0x39291909);
2355 nv_icmd(dev, 0x000007dd, 0x79695949);
2356 nv_icmd(dev, 0x000007de, 0xb9a99989);
2357 nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
2358 nv_icmd(dev, 0x000007e8, 0x00003210);
2359 nv_icmd(dev, 0x000007e9, 0x00007654);
2360 nv_icmd(dev, 0x000007ea, 0x00000098);
2361 nv_icmd(dev, 0x000007ec, 0x39291909);
2362 nv_icmd(dev, 0x000007ed, 0x79695949);
2363 nv_icmd(dev, 0x000007ee, 0xb9a99989);
2364 nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
2365 nv_icmd(dev, 0x000007f0, 0x00003210);
2366 nv_icmd(dev, 0x000007f1, 0x00007654);
2367 nv_icmd(dev, 0x000007f2, 0x00000098);
2368 nv_icmd(dev, 0x000005a5, 0x00000001);
2369 nv_icmd(dev, 0x00000980, 0x00000000);
2370 nv_icmd(dev, 0x00000981, 0x00000000);
2371 nv_icmd(dev, 0x00000982, 0x00000000);
2372 nv_icmd(dev, 0x00000983, 0x00000000);
2373 nv_icmd(dev, 0x00000984, 0x00000000);
2374 nv_icmd(dev, 0x00000985, 0x00000000);
2375 nv_icmd(dev, 0x00000986, 0x00000000);
2376 nv_icmd(dev, 0x00000987, 0x00000000);
2377 nv_icmd(dev, 0x00000988, 0x00000000);
2378 nv_icmd(dev, 0x00000989, 0x00000000);
2379 nv_icmd(dev, 0x0000098a, 0x00000000);
2380 nv_icmd(dev, 0x0000098b, 0x00000000);
2381 nv_icmd(dev, 0x0000098c, 0x00000000);
2382 nv_icmd(dev, 0x0000098d, 0x00000000);
2383 nv_icmd(dev, 0x0000098e, 0x00000000);
2384 nv_icmd(dev, 0x0000098f, 0x00000000);
2385 nv_icmd(dev, 0x00000990, 0x00000000);
2386 nv_icmd(dev, 0x00000991, 0x00000000);
2387 nv_icmd(dev, 0x00000992, 0x00000000);
2388 nv_icmd(dev, 0x00000993, 0x00000000);
2389 nv_icmd(dev, 0x00000994, 0x00000000);
2390 nv_icmd(dev, 0x00000995, 0x00000000);
2391 nv_icmd(dev, 0x00000996, 0x00000000);
2392 nv_icmd(dev, 0x00000997, 0x00000000);
2393 nv_icmd(dev, 0x00000998, 0x00000000);
2394 nv_icmd(dev, 0x00000999, 0x00000000);
2395 nv_icmd(dev, 0x0000099a, 0x00000000);
2396 nv_icmd(dev, 0x0000099b, 0x00000000);
2397 nv_icmd(dev, 0x0000099c, 0x00000000);
2398 nv_icmd(dev, 0x0000099d, 0x00000000);
2399 nv_icmd(dev, 0x0000099e, 0x00000000);
2400 nv_icmd(dev, 0x0000099f, 0x00000000);
2401 nv_icmd(dev, 0x000009a0, 0x00000000);
2402 nv_icmd(dev, 0x000009a1, 0x00000000);
2403 nv_icmd(dev, 0x000009a2, 0x00000000);
2404 nv_icmd(dev, 0x000009a3, 0x00000000);
2405 nv_icmd(dev, 0x000009a4, 0x00000000);
2406 nv_icmd(dev, 0x000009a5, 0x00000000);
2407 nv_icmd(dev, 0x000009a6, 0x00000000);
2408 nv_icmd(dev, 0x000009a7, 0x00000000);
2409 nv_icmd(dev, 0x000009a8, 0x00000000);
2410 nv_icmd(dev, 0x000009a9, 0x00000000);
2411 nv_icmd(dev, 0x000009aa, 0x00000000);
2412 nv_icmd(dev, 0x000009ab, 0x00000000);
2413 nv_icmd(dev, 0x000009ac, 0x00000000);
2414 nv_icmd(dev, 0x000009ad, 0x00000000);
2415 nv_icmd(dev, 0x000009ae, 0x00000000);
2416 nv_icmd(dev, 0x000009af, 0x00000000);
2417 nv_icmd(dev, 0x000009b0, 0x00000000);
2418 nv_icmd(dev, 0x000009b1, 0x00000000);
2419 nv_icmd(dev, 0x000009b2, 0x00000000);
2420 nv_icmd(dev, 0x000009b3, 0x00000000);
2421 nv_icmd(dev, 0x000009b4, 0x00000000);
2422 nv_icmd(dev, 0x000009b5, 0x00000000);
2423 nv_icmd(dev, 0x000009b6, 0x00000000);
2424 nv_icmd(dev, 0x000009b7, 0x00000000);
2425 nv_icmd(dev, 0x000009b8, 0x00000000);
2426 nv_icmd(dev, 0x000009b9, 0x00000000);
2427 nv_icmd(dev, 0x000009ba, 0x00000000);
2428 nv_icmd(dev, 0x000009bb, 0x00000000);
2429 nv_icmd(dev, 0x000009bc, 0x00000000);
2430 nv_icmd(dev, 0x000009bd, 0x00000000);
2431 nv_icmd(dev, 0x000009be, 0x00000000);
2432 nv_icmd(dev, 0x000009bf, 0x00000000);
2433 nv_icmd(dev, 0x000009c0, 0x00000000);
2434 nv_icmd(dev, 0x000009c1, 0x00000000);
2435 nv_icmd(dev, 0x000009c2, 0x00000000);
2436 nv_icmd(dev, 0x000009c3, 0x00000000);
2437 nv_icmd(dev, 0x000009c4, 0x00000000);
2438 nv_icmd(dev, 0x000009c5, 0x00000000);
2439 nv_icmd(dev, 0x000009c6, 0x00000000);
2440 nv_icmd(dev, 0x000009c7, 0x00000000);
2441 nv_icmd(dev, 0x000009c8, 0x00000000);
2442 nv_icmd(dev, 0x000009c9, 0x00000000);
2443 nv_icmd(dev, 0x000009ca, 0x00000000);
2444 nv_icmd(dev, 0x000009cb, 0x00000000);
2445 nv_icmd(dev, 0x000009cc, 0x00000000);
2446 nv_icmd(dev, 0x000009cd, 0x00000000);
2447 nv_icmd(dev, 0x000009ce, 0x00000000);
2448 nv_icmd(dev, 0x000009cf, 0x00000000);
2449 nv_icmd(dev, 0x000009d0, 0x00000000);
2450 nv_icmd(dev, 0x000009d1, 0x00000000);
2451 nv_icmd(dev, 0x000009d2, 0x00000000);
2452 nv_icmd(dev, 0x000009d3, 0x00000000);
2453 nv_icmd(dev, 0x000009d4, 0x00000000);
2454 nv_icmd(dev, 0x000009d5, 0x00000000);
2455 nv_icmd(dev, 0x000009d6, 0x00000000);
2456 nv_icmd(dev, 0x000009d7, 0x00000000);
2457 nv_icmd(dev, 0x000009d8, 0x00000000);
2458 nv_icmd(dev, 0x000009d9, 0x00000000);
2459 nv_icmd(dev, 0x000009da, 0x00000000);
2460 nv_icmd(dev, 0x000009db, 0x00000000);
2461 nv_icmd(dev, 0x000009dc, 0x00000000);
2462 nv_icmd(dev, 0x000009dd, 0x00000000);
2463 nv_icmd(dev, 0x000009de, 0x00000000);
2464 nv_icmd(dev, 0x000009df, 0x00000000);
2465 nv_icmd(dev, 0x000009e0, 0x00000000);
2466 nv_icmd(dev, 0x000009e1, 0x00000000);
2467 nv_icmd(dev, 0x000009e2, 0x00000000);
2468 nv_icmd(dev, 0x000009e3, 0x00000000);
2469 nv_icmd(dev, 0x000009e4, 0x00000000);
2470 nv_icmd(dev, 0x000009e5, 0x00000000);
2471 nv_icmd(dev, 0x000009e6, 0x00000000);
2472 nv_icmd(dev, 0x000009e7, 0x00000000);
2473 nv_icmd(dev, 0x000009e8, 0x00000000);
2474 nv_icmd(dev, 0x000009e9, 0x00000000);
2475 nv_icmd(dev, 0x000009ea, 0x00000000);
2476 nv_icmd(dev, 0x000009eb, 0x00000000);
2477 nv_icmd(dev, 0x000009ec, 0x00000000);
2478 nv_icmd(dev, 0x000009ed, 0x00000000);
2479 nv_icmd(dev, 0x000009ee, 0x00000000);
2480 nv_icmd(dev, 0x000009ef, 0x00000000);
2481 nv_icmd(dev, 0x000009f0, 0x00000000);
2482 nv_icmd(dev, 0x000009f1, 0x00000000);
2483 nv_icmd(dev, 0x000009f2, 0x00000000);
2484 nv_icmd(dev, 0x000009f3, 0x00000000);
2485 nv_icmd(dev, 0x000009f4, 0x00000000);
2486 nv_icmd(dev, 0x000009f5, 0x00000000);
2487 nv_icmd(dev, 0x000009f6, 0x00000000);
2488 nv_icmd(dev, 0x000009f7, 0x00000000);
2489 nv_icmd(dev, 0x000009f8, 0x00000000);
2490 nv_icmd(dev, 0x000009f9, 0x00000000);
2491 nv_icmd(dev, 0x000009fa, 0x00000000);
2492 nv_icmd(dev, 0x000009fb, 0x00000000);
2493 nv_icmd(dev, 0x000009fc, 0x00000000);
2494 nv_icmd(dev, 0x000009fd, 0x00000000);
2495 nv_icmd(dev, 0x000009fe, 0x00000000);
2496 nv_icmd(dev, 0x000009ff, 0x00000000);
2497 nv_icmd(dev, 0x00000468, 0x00000004);
2498 nv_icmd(dev, 0x0000046c, 0x00000001);
2499 nv_icmd(dev, 0x00000470, 0x00000000);
2500 nv_icmd(dev, 0x00000471, 0x00000000);
2501 nv_icmd(dev, 0x00000472, 0x00000000);
2502 nv_icmd(dev, 0x00000473, 0x00000000);
2503 nv_icmd(dev, 0x00000474, 0x00000000);
2504 nv_icmd(dev, 0x00000475, 0x00000000);
2505 nv_icmd(dev, 0x00000476, 0x00000000);
2506 nv_icmd(dev, 0x00000477, 0x00000000);
2507 nv_icmd(dev, 0x00000478, 0x00000000);
2508 nv_icmd(dev, 0x00000479, 0x00000000);
2509 nv_icmd(dev, 0x0000047a, 0x00000000);
2510 nv_icmd(dev, 0x0000047b, 0x00000000);
2511 nv_icmd(dev, 0x0000047c, 0x00000000);
2512 nv_icmd(dev, 0x0000047d, 0x00000000);
2513 nv_icmd(dev, 0x0000047e, 0x00000000);
2514 nv_icmd(dev, 0x0000047f, 0x00000000);
2515 nv_icmd(dev, 0x00000480, 0x00000000);
2516 nv_icmd(dev, 0x00000481, 0x00000000);
2517 nv_icmd(dev, 0x00000482, 0x00000000);
2518 nv_icmd(dev, 0x00000483, 0x00000000);
2519 nv_icmd(dev, 0x00000484, 0x00000000);
2520 nv_icmd(dev, 0x00000485, 0x00000000);
2521 nv_icmd(dev, 0x00000486, 0x00000000);
2522 nv_icmd(dev, 0x00000487, 0x00000000);
2523 nv_icmd(dev, 0x00000488, 0x00000000);
2524 nv_icmd(dev, 0x00000489, 0x00000000);
2525 nv_icmd(dev, 0x0000048a, 0x00000000);
2526 nv_icmd(dev, 0x0000048b, 0x00000000);
2527 nv_icmd(dev, 0x0000048c, 0x00000000);
2528 nv_icmd(dev, 0x0000048d, 0x00000000);
2529 nv_icmd(dev, 0x0000048e, 0x00000000);
2530 nv_icmd(dev, 0x0000048f, 0x00000000);
2531 nv_icmd(dev, 0x00000490, 0x00000000);
2532 nv_icmd(dev, 0x00000491, 0x00000000);
2533 nv_icmd(dev, 0x00000492, 0x00000000);
2534 nv_icmd(dev, 0x00000493, 0x00000000);
2535 nv_icmd(dev, 0x00000494, 0x00000000);
2536 nv_icmd(dev, 0x00000495, 0x00000000);
2537 nv_icmd(dev, 0x00000496, 0x00000000);
2538 nv_icmd(dev, 0x00000497, 0x00000000);
2539 nv_icmd(dev, 0x00000498, 0x00000000);
2540 nv_icmd(dev, 0x00000499, 0x00000000);
2541 nv_icmd(dev, 0x0000049a, 0x00000000);
2542 nv_icmd(dev, 0x0000049b, 0x00000000);
2543 nv_icmd(dev, 0x0000049c, 0x00000000);
2544 nv_icmd(dev, 0x0000049d, 0x00000000);
2545 nv_icmd(dev, 0x0000049e, 0x00000000);
2546 nv_icmd(dev, 0x0000049f, 0x00000000);
2547 nv_icmd(dev, 0x000004a0, 0x00000000);
2548 nv_icmd(dev, 0x000004a1, 0x00000000);
2549 nv_icmd(dev, 0x000004a2, 0x00000000);
2550 nv_icmd(dev, 0x000004a3, 0x00000000);
2551 nv_icmd(dev, 0x000004a4, 0x00000000);
2552 nv_icmd(dev, 0x000004a5, 0x00000000);
2553 nv_icmd(dev, 0x000004a6, 0x00000000);
2554 nv_icmd(dev, 0x000004a7, 0x00000000);
2555 nv_icmd(dev, 0x000004a8, 0x00000000);
2556 nv_icmd(dev, 0x000004a9, 0x00000000);
2557 nv_icmd(dev, 0x000004aa, 0x00000000);
2558 nv_icmd(dev, 0x000004ab, 0x00000000);
2559 nv_icmd(dev, 0x000004ac, 0x00000000);
2560 nv_icmd(dev, 0x000004ad, 0x00000000);
2561 nv_icmd(dev, 0x000004ae, 0x00000000);
2562 nv_icmd(dev, 0x000004af, 0x00000000);
2563 nv_icmd(dev, 0x000004b0, 0x00000000);
2564 nv_icmd(dev, 0x000004b1, 0x00000000);
2565 nv_icmd(dev, 0x000004b2, 0x00000000);
2566 nv_icmd(dev, 0x000004b3, 0x00000000);
2567 nv_icmd(dev, 0x000004b4, 0x00000000);
2568 nv_icmd(dev, 0x000004b5, 0x00000000);
2569 nv_icmd(dev, 0x000004b6, 0x00000000);
2570 nv_icmd(dev, 0x000004b7, 0x00000000);
2571 nv_icmd(dev, 0x000004b8, 0x00000000);
2572 nv_icmd(dev, 0x000004b9, 0x00000000);
2573 nv_icmd(dev, 0x000004ba, 0x00000000);
2574 nv_icmd(dev, 0x000004bb, 0x00000000);
2575 nv_icmd(dev, 0x000004bc, 0x00000000);
2576 nv_icmd(dev, 0x000004bd, 0x00000000);
2577 nv_icmd(dev, 0x000004be, 0x00000000);
2578 nv_icmd(dev, 0x000004bf, 0x00000000);
2579 nv_icmd(dev, 0x000004c0, 0x00000000);
2580 nv_icmd(dev, 0x000004c1, 0x00000000);
2581 nv_icmd(dev, 0x000004c2, 0x00000000);
2582 nv_icmd(dev, 0x000004c3, 0x00000000);
2583 nv_icmd(dev, 0x000004c4, 0x00000000);
2584 nv_icmd(dev, 0x000004c5, 0x00000000);
2585 nv_icmd(dev, 0x000004c6, 0x00000000);
2586 nv_icmd(dev, 0x000004c7, 0x00000000);
2587 nv_icmd(dev, 0x000004c8, 0x00000000);
2588 nv_icmd(dev, 0x000004c9, 0x00000000);
2589 nv_icmd(dev, 0x000004ca, 0x00000000);
2590 nv_icmd(dev, 0x000004cb, 0x00000000);
2591 nv_icmd(dev, 0x000004cc, 0x00000000);
2592 nv_icmd(dev, 0x000004cd, 0x00000000);
2593 nv_icmd(dev, 0x000004ce, 0x00000000);
2594 nv_icmd(dev, 0x000004cf, 0x00000000);
2595 nv_icmd(dev, 0x00000510, 0x3f800000);
2596 nv_icmd(dev, 0x00000511, 0x3f800000);
2597 nv_icmd(dev, 0x00000512, 0x3f800000);
2598 nv_icmd(dev, 0x00000513, 0x3f800000);
2599 nv_icmd(dev, 0x00000514, 0x3f800000);
2600 nv_icmd(dev, 0x00000515, 0x3f800000);
2601 nv_icmd(dev, 0x00000516, 0x3f800000);
2602 nv_icmd(dev, 0x00000517, 0x3f800000);
2603 nv_icmd(dev, 0x00000518, 0x3f800000);
2604 nv_icmd(dev, 0x00000519, 0x3f800000);
2605 nv_icmd(dev, 0x0000051a, 0x3f800000);
2606 nv_icmd(dev, 0x0000051b, 0x3f800000);
2607 nv_icmd(dev, 0x0000051c, 0x3f800000);
2608 nv_icmd(dev, 0x0000051d, 0x3f800000);
2609 nv_icmd(dev, 0x0000051e, 0x3f800000);
2610 nv_icmd(dev, 0x0000051f, 0x3f800000);
2611 nv_icmd(dev, 0x00000520, 0x000002b6);
2612 nv_icmd(dev, 0x00000529, 0x00000001);
2613 nv_icmd(dev, 0x00000530, 0xffff0000);
2614 nv_icmd(dev, 0x00000531, 0xffff0000);
2615 nv_icmd(dev, 0x00000532, 0xffff0000);
2616 nv_icmd(dev, 0x00000533, 0xffff0000);
2617 nv_icmd(dev, 0x00000534, 0xffff0000);
2618 nv_icmd(dev, 0x00000535, 0xffff0000);
2619 nv_icmd(dev, 0x00000536, 0xffff0000);
2620 nv_icmd(dev, 0x00000537, 0xffff0000);
2621 nv_icmd(dev, 0x00000538, 0xffff0000);
2622 nv_icmd(dev, 0x00000539, 0xffff0000);
2623 nv_icmd(dev, 0x0000053a, 0xffff0000);
2624 nv_icmd(dev, 0x0000053b, 0xffff0000);
2625 nv_icmd(dev, 0x0000053c, 0xffff0000);
2626 nv_icmd(dev, 0x0000053d, 0xffff0000);
2627 nv_icmd(dev, 0x0000053e, 0xffff0000);
2628 nv_icmd(dev, 0x0000053f, 0xffff0000);
2629 nv_icmd(dev, 0x00000585, 0x0000003f);
2630 nv_icmd(dev, 0x00000576, 0x00000003);
2631 if (dev_priv->chipset == 0xc1 ||
2632 dev_priv->chipset == 0xd9)
2633 nv_icmd(dev, 0x0000057b, 0x00000059);
2634 nv_icmd(dev, 0x00000586, 0x00000040);
2635 nv_icmd(dev, 0x00000582, 0x00000080);
2636 nv_icmd(dev, 0x00000583, 0x00000080);
2637 nv_icmd(dev, 0x000005c2, 0x00000001);
2638 nv_icmd(dev, 0x00000638, 0x00000001);
2639 nv_icmd(dev, 0x00000639, 0x00000001);
2640 nv_icmd(dev, 0x0000063a, 0x00000002);
2641 nv_icmd(dev, 0x0000063b, 0x00000001);
2642 nv_icmd(dev, 0x0000063c, 0x00000001);
2643 nv_icmd(dev, 0x0000063d, 0x00000002);
2644 nv_icmd(dev, 0x0000063e, 0x00000001);
2645 nv_icmd(dev, 0x000008b8, 0x00000001);
2646 nv_icmd(dev, 0x000008b9, 0x00000001);
2647 nv_icmd(dev, 0x000008ba, 0x00000001);
2648 nv_icmd(dev, 0x000008bb, 0x00000001);
2649 nv_icmd(dev, 0x000008bc, 0x00000001);
2650 nv_icmd(dev, 0x000008bd, 0x00000001);
2651 nv_icmd(dev, 0x000008be, 0x00000001);
2652 nv_icmd(dev, 0x000008bf, 0x00000001);
2653 nv_icmd(dev, 0x00000900, 0x00000001);
2654 nv_icmd(dev, 0x00000901, 0x00000001);
2655 nv_icmd(dev, 0x00000902, 0x00000001);
2656 nv_icmd(dev, 0x00000903, 0x00000001);
2657 nv_icmd(dev, 0x00000904, 0x00000001);
2658 nv_icmd(dev, 0x00000905, 0x00000001);
2659 nv_icmd(dev, 0x00000906, 0x00000001);
2660 nv_icmd(dev, 0x00000907, 0x00000001);
2661 nv_icmd(dev, 0x00000908, 0x00000002);
2662 nv_icmd(dev, 0x00000909, 0x00000002);
2663 nv_icmd(dev, 0x0000090a, 0x00000002);
2664 nv_icmd(dev, 0x0000090b, 0x00000002);
2665 nv_icmd(dev, 0x0000090c, 0x00000002);
2666 nv_icmd(dev, 0x0000090d, 0x00000002);
2667 nv_icmd(dev, 0x0000090e, 0x00000002);
2668 nv_icmd(dev, 0x0000090f, 0x00000002);
2669 nv_icmd(dev, 0x00000910, 0x00000001);
2670 nv_icmd(dev, 0x00000911, 0x00000001);
2671 nv_icmd(dev, 0x00000912, 0x00000001);
2672 nv_icmd(dev, 0x00000913, 0x00000001);
2673 nv_icmd(dev, 0x00000914, 0x00000001);
2674 nv_icmd(dev, 0x00000915, 0x00000001);
2675 nv_icmd(dev, 0x00000916, 0x00000001);
2676 nv_icmd(dev, 0x00000917, 0x00000001);
2677 nv_icmd(dev, 0x00000918, 0x00000001);
2678 nv_icmd(dev, 0x00000919, 0x00000001);
2679 nv_icmd(dev, 0x0000091a, 0x00000001);
2680 nv_icmd(dev, 0x0000091b, 0x00000001);
2681 nv_icmd(dev, 0x0000091c, 0x00000001);
2682 nv_icmd(dev, 0x0000091d, 0x00000001);
2683 nv_icmd(dev, 0x0000091e, 0x00000001);
2684 nv_icmd(dev, 0x0000091f, 0x00000001);
2685 nv_icmd(dev, 0x00000920, 0x00000002);
2686 nv_icmd(dev, 0x00000921, 0x00000002);
2687 nv_icmd(dev, 0x00000922, 0x00000002);
2688 nv_icmd(dev, 0x00000923, 0x00000002);
2689 nv_icmd(dev, 0x00000924, 0x00000002);
2690 nv_icmd(dev, 0x00000925, 0x00000002);
2691 nv_icmd(dev, 0x00000926, 0x00000002);
2692 nv_icmd(dev, 0x00000927, 0x00000002);
2693 nv_icmd(dev, 0x00000928, 0x00000001);
2694 nv_icmd(dev, 0x00000929, 0x00000001);
2695 nv_icmd(dev, 0x0000092a, 0x00000001);
2696 nv_icmd(dev, 0x0000092b, 0x00000001);
2697 nv_icmd(dev, 0x0000092c, 0x00000001);
2698 nv_icmd(dev, 0x0000092d, 0x00000001);
2699 nv_icmd(dev, 0x0000092e, 0x00000001);
2700 nv_icmd(dev, 0x0000092f, 0x00000001);
2701 nv_icmd(dev, 0x00000648, 0x00000001);
2702 nv_icmd(dev, 0x00000649, 0x00000001);
2703 nv_icmd(dev, 0x0000064a, 0x00000001);
2704 nv_icmd(dev, 0x0000064b, 0x00000001);
2705 nv_icmd(dev, 0x0000064c, 0x00000001);
2706 nv_icmd(dev, 0x0000064d, 0x00000001);
2707 nv_icmd(dev, 0x0000064e, 0x00000001);
2708 nv_icmd(dev, 0x0000064f, 0x00000001);
2709 nv_icmd(dev, 0x00000650, 0x00000001);
2710 nv_icmd(dev, 0x00000658, 0x0000000f);
2711 nv_icmd(dev, 0x000007ff, 0x0000000a);
2712 nv_icmd(dev, 0x0000066a, 0x40000000);
2713 nv_icmd(dev, 0x0000066b, 0x10000000);
2714 nv_icmd(dev, 0x0000066c, 0xffff0000);
2715 nv_icmd(dev, 0x0000066d, 0xffff0000);
2716 nv_icmd(dev, 0x000007af, 0x00000008);
2717 nv_icmd(dev, 0x000007b0, 0x00000008);
2718 nv_icmd(dev, 0x000007f6, 0x00000001);
2719 nv_icmd(dev, 0x000006b2, 0x00000055);
2720 nv_icmd(dev, 0x000007ad, 0x00000003);
2721 nv_icmd(dev, 0x00000937, 0x00000001);
2722 nv_icmd(dev, 0x00000971, 0x00000008);
2723 nv_icmd(dev, 0x00000972, 0x00000040);
2724 nv_icmd(dev, 0x00000973, 0x0000012c);
2725 nv_icmd(dev, 0x0000097c, 0x00000040);
2726 nv_icmd(dev, 0x00000979, 0x00000003);
2727 nv_icmd(dev, 0x00000975, 0x00000020);
2728 nv_icmd(dev, 0x00000976, 0x00000001);
2729 nv_icmd(dev, 0x00000977, 0x00000020);
2730 nv_icmd(dev, 0x00000978, 0x00000001);
2731 nv_icmd(dev, 0x00000957, 0x00000003);
2732 nv_icmd(dev, 0x0000095e, 0x20164010);
2733 nv_icmd(dev, 0x0000095f, 0x00000020);
2734 if (dev_priv->chipset == 0xd9)
2735 nv_icmd(dev, 0x0000097d, 0x00000020);
2736 nv_icmd(dev, 0x00000683, 0x00000006);
2737 nv_icmd(dev, 0x00000685, 0x003fffff);
2738 nv_icmd(dev, 0x00000687, 0x00000c48);
2739 nv_icmd(dev, 0x000006a0, 0x00000005);
2740 nv_icmd(dev, 0x00000840, 0x00300008);
2741 nv_icmd(dev, 0x00000841, 0x04000080);
2742 nv_icmd(dev, 0x00000842, 0x00300008);
2743 nv_icmd(dev, 0x00000843, 0x04000080);
2744 nv_icmd(dev, 0x00000818, 0x00000000);
2745 nv_icmd(dev, 0x00000819, 0x00000000);
2746 nv_icmd(dev, 0x0000081a, 0x00000000);
2747 nv_icmd(dev, 0x0000081b, 0x00000000);
2748 nv_icmd(dev, 0x0000081c, 0x00000000);
2749 nv_icmd(dev, 0x0000081d, 0x00000000);
2750 nv_icmd(dev, 0x0000081e, 0x00000000);
2751 nv_icmd(dev, 0x0000081f, 0x00000000);
2752 nv_icmd(dev, 0x00000848, 0x00000000);
2753 nv_icmd(dev, 0x00000849, 0x00000000);
2754 nv_icmd(dev, 0x0000084a, 0x00000000);
2755 nv_icmd(dev, 0x0000084b, 0x00000000);
2756 nv_icmd(dev, 0x0000084c, 0x00000000);
2757 nv_icmd(dev, 0x0000084d, 0x00000000);
2758 nv_icmd(dev, 0x0000084e, 0x00000000);
2759 nv_icmd(dev, 0x0000084f, 0x00000000);
2760 nv_icmd(dev, 0x00000850, 0x00000000);
2761 nv_icmd(dev, 0x00000851, 0x00000000);
2762 nv_icmd(dev, 0x00000852, 0x00000000);
2763 nv_icmd(dev, 0x00000853, 0x00000000);
2764 nv_icmd(dev, 0x00000854, 0x00000000);
2765 nv_icmd(dev, 0x00000855, 0x00000000);
2766 nv_icmd(dev, 0x00000856, 0x00000000);
2767 nv_icmd(dev, 0x00000857, 0x00000000);
2768 nv_icmd(dev, 0x00000738, 0x00000000);
2769 nv_icmd(dev, 0x000006aa, 0x00000001);
2770 nv_icmd(dev, 0x000006ab, 0x00000002);
2771 nv_icmd(dev, 0x000006ac, 0x00000080);
2772 nv_icmd(dev, 0x000006ad, 0x00000100);
2773 nv_icmd(dev, 0x000006ae, 0x00000100);
2774 nv_icmd(dev, 0x000006b1, 0x00000011);
2775 nv_icmd(dev, 0x000006bb, 0x000000cf);
2776 nv_icmd(dev, 0x000006ce, 0x2a712488);
2777 nv_icmd(dev, 0x00000739, 0x4085c000);
2778 nv_icmd(dev, 0x0000073a, 0x00000080);
2779 nv_icmd(dev, 0x00000786, 0x80000100);
2780 nv_icmd(dev, 0x0000073c, 0x00010100);
2781 nv_icmd(dev, 0x0000073d, 0x02800000);
2782 nv_icmd(dev, 0x00000787, 0x000000cf);
2783 nv_icmd(dev, 0x0000078c, 0x00000008);
2784 nv_icmd(dev, 0x00000792, 0x00000001);
2785 nv_icmd(dev, 0x00000794, 0x00000001);
2786 nv_icmd(dev, 0x00000795, 0x00000001);
2787 nv_icmd(dev, 0x00000796, 0x00000001);
2788 nv_icmd(dev, 0x00000797, 0x000000cf);
2789 nv_icmd(dev, 0x00000836, 0x00000001);
2790 nv_icmd(dev, 0x0000079a, 0x00000002);
2791 nv_icmd(dev, 0x00000833, 0x04444480);
2792 nv_icmd(dev, 0x000007a1, 0x00000001);
2793 nv_icmd(dev, 0x000007a3, 0x00000001);
2794 nv_icmd(dev, 0x000007a4, 0x00000001);
2795 nv_icmd(dev, 0x000007a5, 0x00000001);
2796 nv_icmd(dev, 0x00000831, 0x00000004);
2797 nv_icmd(dev, 0x0000080c, 0x00000002);
2798 nv_icmd(dev, 0x0000080d, 0x00000100);
2799 nv_icmd(dev, 0x0000080e, 0x00000100);
2800 nv_icmd(dev, 0x0000080f, 0x00000001);
2801 nv_icmd(dev, 0x00000823, 0x00000002);
2802 nv_icmd(dev, 0x00000824, 0x00000100);
2803 nv_icmd(dev, 0x00000825, 0x00000100);
2804 nv_icmd(dev, 0x00000826, 0x00000001);
2805 nv_icmd(dev, 0x0000095d, 0x00000001);
2806 nv_icmd(dev, 0x0000082b, 0x00000004);
2807 nv_icmd(dev, 0x00000942, 0x00010001);
2808 nv_icmd(dev, 0x00000943, 0x00000001);
2809 nv_icmd(dev, 0x00000944, 0x00000022);
2810 nv_icmd(dev, 0x000007c5, 0x00010001);
2811 nv_icmd(dev, 0x00000834, 0x00000001);
2812 nv_icmd(dev, 0x000007c7, 0x00000001);
2813 nv_icmd(dev, 0x0000c1b0, 0x0000000f);
2814 nv_icmd(dev, 0x0000c1b1, 0x0000000f);
2815 nv_icmd(dev, 0x0000c1b2, 0x0000000f);
2816 nv_icmd(dev, 0x0000c1b3, 0x0000000f);
2817 nv_icmd(dev, 0x0000c1b4, 0x0000000f);
2818 nv_icmd(dev, 0x0000c1b5, 0x0000000f);
2819 nv_icmd(dev, 0x0000c1b6, 0x0000000f);
2820 nv_icmd(dev, 0x0000c1b7, 0x0000000f);
2821 nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
2822 nv_icmd(dev, 0x0000c1b9, 0x00fac688);
2823 nv_icmd(dev, 0x0001e100, 0x00000001);
2824 nv_icmd(dev, 0x00001000, 0x00000002);
2825 nv_icmd(dev, 0x000006aa, 0x00000001);
2826 nv_icmd(dev, 0x000006ad, 0x00000100);
2827 nv_icmd(dev, 0x000006ae, 0x00000100);
2828 nv_icmd(dev, 0x000006b1, 0x00000011);
2829 nv_icmd(dev, 0x0000078c, 0x00000008);
2830 nv_icmd(dev, 0x00000792, 0x00000001);
2831 nv_icmd(dev, 0x00000794, 0x00000001);
2832 nv_icmd(dev, 0x00000795, 0x00000001);
2833 nv_icmd(dev, 0x00000796, 0x00000001);
2834 nv_icmd(dev, 0x00000797, 0x000000cf);
2835 nv_icmd(dev, 0x0000079a, 0x00000002);
2836 nv_icmd(dev, 0x00000833, 0x04444480);
2837 nv_icmd(dev, 0x000007a1, 0x00000001);
2838 nv_icmd(dev, 0x000007a3, 0x00000001);
2839 nv_icmd(dev, 0x000007a4, 0x00000001);
2840 nv_icmd(dev, 0x000007a5, 0x00000001);
2841 nv_icmd(dev, 0x00000831, 0x00000004);
2842 nv_icmd(dev, 0x0001e100, 0x00000001);
2843 nv_icmd(dev, 0x00001000, 0x00000014);
2844 nv_icmd(dev, 0x00000351, 0x00000100);
2845 nv_icmd(dev, 0x00000957, 0x00000003);
2846 nv_icmd(dev, 0x0000095d, 0x00000001);
2847 nv_icmd(dev, 0x0000082b, 0x00000004);
2848 nv_icmd(dev, 0x00000942, 0x00010001);
2849 nv_icmd(dev, 0x00000943, 0x00000001);
2850 nv_icmd(dev, 0x000007c5, 0x00010001);
2851 nv_icmd(dev, 0x00000834, 0x00000001);
2852 nv_icmd(dev, 0x000007c7, 0x00000001);
2853 nv_icmd(dev, 0x0001e100, 0x00000001);
2854 nv_icmd(dev, 0x00001000, 0x00000001);
2855 nv_icmd(dev, 0x0000080c, 0x00000002);
2856 nv_icmd(dev, 0x0000080d, 0x00000100);
2857 nv_icmd(dev, 0x0000080e, 0x00000100);
2858 nv_icmd(dev, 0x0000080f, 0x00000001);
2859 nv_icmd(dev, 0x00000823, 0x00000002);
2860 nv_icmd(dev, 0x00000824, 0x00000100);
2861 nv_icmd(dev, 0x00000825, 0x00000100);
2862 nv_icmd(dev, 0x00000826, 0x00000001);
2863 nv_icmd(dev, 0x0001e100, 0x00000001);
2864 nv_wr32(dev, 0x400208, 0x00000000);
2865 nv_wr32(dev, 0x404154, 0x00000400);
2866
2867 nvc0_grctx_generate_9097(dev);
2868 if (fermi >= 0x9197)
2869 nvc0_grctx_generate_9197(dev);
2870 if (fermi >= 0x9297)
2871 nvc0_grctx_generate_9297(dev);
2872 nvc0_grctx_generate_902d(dev);
2873 nvc0_grctx_generate_9039(dev);
2874 nvc0_grctx_generate_90c0(dev);
2875
2876 nv_wr32(dev, 0x000260, r000260);
2877 return 0;
2878}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
deleted file mode 100644
index b701c439c92e..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ /dev/null
@@ -1,223 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30struct nvc0_instmem_priv {
31 struct nouveau_gpuobj *bar1_pgd;
32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3;
35};
36
37int
38nvc0_instmem_suspend(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 dev_priv->ramin_available = false;
43 return 0;
44}
45
46void
47nvc0_instmem_resume(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
51
52 nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
53 nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
54 nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
55 dev_priv->ramin_available = true;
56}
57
58static void
59nvc0_channel_del(struct nouveau_channel **pchan)
60{
61 struct nouveau_channel *chan;
62
63 chan = *pchan;
64 *pchan = NULL;
65 if (!chan)
66 return;
67
68 nouveau_vm_ref(NULL, &chan->vm, NULL);
69 if (drm_mm_initialized(&chan->ramin_heap))
70 drm_mm_takedown(&chan->ramin_heap);
71 nouveau_gpuobj_ref(NULL, &chan->ramin);
72 kfree(chan);
73}
74
75static int
76nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
77 struct nouveau_channel **pchan,
78 struct nouveau_gpuobj *pgd, u64 vm_size)
79{
80 struct nouveau_channel *chan;
81 int ret;
82
83 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
84 if (!chan)
85 return -ENOMEM;
86 chan->dev = dev;
87
88 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
89 if (ret) {
90 nvc0_channel_del(&chan);
91 return ret;
92 }
93
94 ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
95 if (ret) {
96 nvc0_channel_del(&chan);
97 return ret;
98 }
99
100 ret = nouveau_vm_ref(vm, &chan->vm, NULL);
101 if (ret) {
102 nvc0_channel_del(&chan);
103 return ret;
104 }
105
106 nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
107 nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
108 nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
109 nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
110
111 *pchan = chan;
112 return 0;
113}
114
115int
116nvc0_instmem_init(struct drm_device *dev)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
120 struct pci_dev *pdev = dev->pdev;
121 struct nvc0_instmem_priv *priv;
122 struct nouveau_vm *vm = NULL;
123 int ret;
124
125 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
126 if (!priv)
127 return -ENOMEM;
128 pinstmem->priv = priv;
129
130 /* BAR3 VM */
131 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
132 &dev_priv->bar3_vm);
133 if (ret)
134 goto error;
135
136 ret = nouveau_gpuobj_new(dev, NULL,
137 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
138 NVOBJ_FLAG_DONT_MAP |
139 NVOBJ_FLAG_ZERO_ALLOC,
140 &dev_priv->bar3_vm->pgt[0].obj[0]);
141 if (ret)
142 goto error;
143 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
144
145 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
146
147 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
149 if (ret)
150 goto error;
151
152 ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
153 if (ret)
154 goto error;
155 nouveau_vm_ref(NULL, &vm, NULL);
156
157 ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
158 priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
159 if (ret)
160 goto error;
161
162 /* BAR1 VM */
163 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
164 if (ret)
165 goto error;
166
167 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
168 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
169 if (ret)
170 goto error;
171
172 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
173 if (ret)
174 goto error;
175 nouveau_vm_ref(NULL, &vm, NULL);
176
177 ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
178 priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
179 if (ret)
180 goto error;
181
182 /* channel vm */
183 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
184 &dev_priv->chan_vm);
185 if (ret)
186 goto error;
187
188 nvc0_instmem_resume(dev);
189 return 0;
190error:
191 nvc0_instmem_takedown(dev);
192 return ret;
193}
194
195void
196nvc0_instmem_takedown(struct drm_device *dev)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
200 struct nouveau_vm *vm = NULL;
201
202 nvc0_instmem_suspend(dev);
203
204 nv_wr32(dev, 0x1704, 0x00000000);
205 nv_wr32(dev, 0x1714, 0x00000000);
206
207 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
208
209 nvc0_channel_del(&priv->bar1);
210 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
211 nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
212
213 nvc0_channel_del(&priv->bar3);
214 nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
215 nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
216 nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
217 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
219
220 dev_priv->engine.instmem.priv = NULL;
221 kfree(priv);
222}
223
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 4e712b10ebdb..0d34eb581179 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -22,18 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "drmP.h" 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h" 26#include "nouveau_bios.h"
28#include "nouveau_pm.h" 27#include "nouveau_pm.h"
29 28
29#include <subdev/bios/pll.h>
30#include <subdev/bios.h>
31#include <subdev/clock.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34
30static u32 read_div(struct drm_device *, int, u32, u32); 35static u32 read_div(struct drm_device *, int, u32, u32);
31static u32 read_pll(struct drm_device *, u32); 36static u32 read_pll(struct drm_device *, u32);
32 37
33static u32 38static u32
34read_vco(struct drm_device *dev, u32 dsrc) 39read_vco(struct drm_device *dev, u32 dsrc)
35{ 40{
36 u32 ssrc = nv_rd32(dev, dsrc); 41 struct nouveau_device *device = nouveau_dev(dev);
42 u32 ssrc = nv_rd32(device, dsrc);
37 if (!(ssrc & 0x00000100)) 43 if (!(ssrc & 0x00000100))
38 return read_pll(dev, 0x00e800); 44 return read_pll(dev, 0x00e800);
39 return read_pll(dev, 0x00e820); 45 return read_pll(dev, 0x00e820);
@@ -42,8 +48,9 @@ read_vco(struct drm_device *dev, u32 dsrc)
42static u32 48static u32
43read_pll(struct drm_device *dev, u32 pll) 49read_pll(struct drm_device *dev, u32 pll)
44{ 50{
45 u32 ctrl = nv_rd32(dev, pll + 0); 51 struct nouveau_device *device = nouveau_dev(dev);
46 u32 coef = nv_rd32(dev, pll + 4); 52 u32 ctrl = nv_rd32(device, pll + 0);
53 u32 coef = nv_rd32(device, pll + 4);
47 u32 P = (coef & 0x003f0000) >> 16; 54 u32 P = (coef & 0x003f0000) >> 16;
48 u32 N = (coef & 0x0000ff00) >> 8; 55 u32 N = (coef & 0x0000ff00) >> 8;
49 u32 M = (coef & 0x000000ff) >> 0; 56 u32 M = (coef & 0x000000ff) >> 0;
@@ -83,8 +90,9 @@ read_pll(struct drm_device *dev, u32 pll)
83static u32 90static u32
84read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl) 91read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
85{ 92{
86 u32 ssrc = nv_rd32(dev, dsrc + (doff * 4)); 93 struct nouveau_device *device = nouveau_dev(dev);
87 u32 sctl = nv_rd32(dev, dctl + (doff * 4)); 94 u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
95 u32 sctl = nv_rd32(device, dctl + (doff * 4));
88 96
89 switch (ssrc & 0x00000003) { 97 switch (ssrc & 0x00000003) {
90 case 0: 98 case 0:
@@ -109,7 +117,8 @@ read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
109static u32 117static u32
110read_mem(struct drm_device *dev) 118read_mem(struct drm_device *dev)
111{ 119{
112 u32 ssel = nv_rd32(dev, 0x1373f0); 120 struct nouveau_device *device = nouveau_dev(dev);
121 u32 ssel = nv_rd32(device, 0x1373f0);
113 if (ssel & 0x00000001) 122 if (ssel & 0x00000001)
114 return read_div(dev, 0, 0x137300, 0x137310); 123 return read_div(dev, 0, 0x137300, 0x137310);
115 return read_pll(dev, 0x132000); 124 return read_pll(dev, 0x132000);
@@ -118,8 +127,9 @@ read_mem(struct drm_device *dev)
118static u32 127static u32
119read_clk(struct drm_device *dev, int clk) 128read_clk(struct drm_device *dev, int clk)
120{ 129{
121 u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4)); 130 struct nouveau_device *device = nouveau_dev(dev);
122 u32 ssel = nv_rd32(dev, 0x137100); 131 u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
132 u32 ssel = nv_rd32(device, 0x137100);
123 u32 sclk, sdiv; 133 u32 sclk, sdiv;
124 134
125 if (ssel & (1 << clk)) { 135 if (ssel & (1 << clk)) {
@@ -212,10 +222,12 @@ calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
212static u32 222static u32
213calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef) 223calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
214{ 224{
215 struct pll_lims limits; 225 struct nouveau_device *device = nouveau_dev(dev);
226 struct nouveau_bios *bios = nouveau_bios(device);
227 struct nvbios_pll limits;
216 int N, M, P, ret; 228 int N, M, P, ret;
217 229
218 ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits); 230 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
219 if (ret) 231 if (ret)
220 return 0; 232 return 0;
221 233
@@ -308,31 +320,33 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
308static int 320static int
309calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq) 321calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
310{ 322{
311 struct pll_lims pll; 323 struct nouveau_device *device = nouveau_dev(dev);
324 struct nouveau_bios *bios = nouveau_bios(device);
325 struct nvbios_pll pll;
312 int N, M, P, ret; 326 int N, M, P, ret;
313 u32 ctrl; 327 u32 ctrl;
314 328
315 /* mclk pll input freq comes from another pll, make sure it's on */ 329 /* mclk pll input freq comes from another pll, make sure it's on */
316 ctrl = nv_rd32(dev, 0x132020); 330 ctrl = nv_rd32(device, 0x132020);
317 if (!(ctrl & 0x00000001)) { 331 if (!(ctrl & 0x00000001)) {
318 /* if not, program it to 567MHz. nfi where this value comes 332 /* if not, program it to 567MHz. nfi where this value comes
319 * from - it looks like it's in the pll limits table for 333 * from - it looks like it's in the pll limits table for
320 * 132000 but the binary driver ignores all my attempts to 334 * 132000 but the binary driver ignores all my attempts to
321 * change this value. 335 * change this value.
322 */ 336 */
323 nv_wr32(dev, 0x137320, 0x00000103); 337 nv_wr32(device, 0x137320, 0x00000103);
324 nv_wr32(dev, 0x137330, 0x81200606); 338 nv_wr32(device, 0x137330, 0x81200606);
325 nv_wait(dev, 0x132020, 0x00010000, 0x00010000); 339 nv_wait(device, 0x132020, 0x00010000, 0x00010000);
326 nv_wr32(dev, 0x132024, 0x0001150f); 340 nv_wr32(device, 0x132024, 0x0001150f);
327 nv_mask(dev, 0x132020, 0x00000001, 0x00000001); 341 nv_mask(device, 0x132020, 0x00000001, 0x00000001);
328 nv_wait(dev, 0x137390, 0x00020000, 0x00020000); 342 nv_wait(device, 0x137390, 0x00020000, 0x00020000);
329 nv_mask(dev, 0x132020, 0x00000004, 0x00000004); 343 nv_mask(device, 0x132020, 0x00000004, 0x00000004);
330 } 344 }
331 345
332 /* for the moment, until the clock tree is better understood, use 346 /* for the moment, until the clock tree is better understood, use
333 * pll mode for all clock frequencies 347 * pll mode for all clock frequencies
334 */ 348 */
335 ret = get_pll_limits(dev, 0x132000, &pll); 349 ret = nvbios_pll_parse(bios, 0x132000, &pll);
336 if (ret == 0) { 350 if (ret == 0) {
337 pll.refclk = read_pll(dev, 0x132020); 351 pll.refclk = read_pll(dev, 0x132020);
338 if (pll.refclk) { 352 if (pll.refclk) {
@@ -350,7 +364,7 @@ calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
350void * 364void *
351nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 365nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
352{ 366{
353 struct drm_nouveau_private *dev_priv = dev->dev_private; 367 struct nouveau_device *device = nouveau_dev(dev);
354 struct nvc0_pm_state *info; 368 struct nvc0_pm_state *info;
355 int ret; 369 int ret;
356 370
@@ -364,7 +378,7 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
364 * are always the same freq with the binary driver even when the 378 * are always the same freq with the binary driver even when the
365 * performance table says they should differ. 379 * performance table says they should differ.
366 */ 380 */
367 if (dev_priv->chipset == 0xd9) 381 if (device->chipset == 0xd9)
368 perflvl->rop = 0; 382 perflvl->rop = 0;
369 383
370 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) || 384 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
@@ -394,38 +408,40 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
394static void 408static void
395prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info) 409prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
396{ 410{
411 struct nouveau_device *device = nouveau_dev(dev);
412
397 /* program dividers at 137160/1371d0 first */ 413 /* program dividers at 137160/1371d0 first */
398 if (clk < 7 && !info->ssel) { 414 if (clk < 7 && !info->ssel) {
399 nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); 415 nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
400 nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc); 416 nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
401 } 417 }
402 418
403 /* switch clock to non-pll mode */ 419 /* switch clock to non-pll mode */
404 nv_mask(dev, 0x137100, (1 << clk), 0x00000000); 420 nv_mask(device, 0x137100, (1 << clk), 0x00000000);
405 nv_wait(dev, 0x137100, (1 << clk), 0x00000000); 421 nv_wait(device, 0x137100, (1 << clk), 0x00000000);
406 422
407 /* reprogram pll */ 423 /* reprogram pll */
408 if (clk < 7) { 424 if (clk < 7) {
409 /* make sure it's disabled first... */ 425 /* make sure it's disabled first... */
410 u32 base = 0x137000 + (clk * 0x20); 426 u32 base = 0x137000 + (clk * 0x20);
411 u32 ctrl = nv_rd32(dev, base + 0x00); 427 u32 ctrl = nv_rd32(device, base + 0x00);
412 if (ctrl & 0x00000001) { 428 if (ctrl & 0x00000001) {
413 nv_mask(dev, base + 0x00, 0x00000004, 0x00000000); 429 nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
414 nv_mask(dev, base + 0x00, 0x00000001, 0x00000000); 430 nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
415 } 431 }
416 /* program it to new values, if necessary */ 432 /* program it to new values, if necessary */
417 if (info->ssel) { 433 if (info->ssel) {
418 nv_wr32(dev, base + 0x04, info->coef); 434 nv_wr32(device, base + 0x04, info->coef);
419 nv_mask(dev, base + 0x00, 0x00000001, 0x00000001); 435 nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
420 nv_wait(dev, base + 0x00, 0x00020000, 0x00020000); 436 nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
421 nv_mask(dev, base + 0x00, 0x00020004, 0x00000004); 437 nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
422 } 438 }
423 } 439 }
424 440
425 /* select pll/non-pll mode, and program final clock divider */ 441 /* select pll/non-pll mode, and program final clock divider */
426 nv_mask(dev, 0x137100, (1 << clk), info->ssel); 442 nv_mask(device, 0x137100, (1 << clk), info->ssel);
427 nv_wait(dev, 0x137100, (1 << clk), info->ssel); 443 nv_wait(device, 0x137100, (1 << clk), info->ssel);
428 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 444 nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
429} 445}
430 446
431static void 447static void
@@ -441,7 +457,8 @@ mclk_refresh(struct nouveau_mem_exec_func *exec)
441static void 457static void
442mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) 458mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
443{ 459{
444 nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000); 460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
445} 462}
446 463
447static void 464static void
@@ -458,83 +475,84 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
458static u32 475static u32
459mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 476mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
460{ 477{
461 struct drm_device *dev = exec->dev; 478 struct nouveau_device *device = nouveau_dev(exec->dev);
462 struct drm_nouveau_private *dev_priv = dev->dev_private; 479 struct nouveau_fb *pfb = nouveau_fb(device);
463 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) { 480 if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
464 if (mr <= 1) 481 if (mr <= 1)
465 return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4)); 482 return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
466 return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4)); 483 return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
467 } else { 484 } else {
468 if (mr == 0) 485 if (mr == 0)
469 return nv_rd32(dev, 0x10f300 + (mr * 4)); 486 return nv_rd32(device, 0x10f300 + (mr * 4));
470 else 487 else
471 if (mr <= 7) 488 if (mr <= 7)
472 return nv_rd32(dev, 0x10f32c + (mr * 4)); 489 return nv_rd32(device, 0x10f32c + (mr * 4));
473 return nv_rd32(dev, 0x10f34c); 490 return nv_rd32(device, 0x10f34c);
474 } 491 }
475} 492}
476 493
477static void 494static void
478mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 495mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
479{ 496{
480 struct drm_device *dev = exec->dev; 497 struct nouveau_device *device = nouveau_dev(exec->dev);
481 struct drm_nouveau_private *dev_priv = dev->dev_private; 498 struct nouveau_fb *pfb = nouveau_fb(device);
482 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) { 499 if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
483 if (mr <= 1) { 500 if (mr <= 1) {
484 nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data); 501 nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
485 if (dev_priv->vram_rank_B) 502 if (pfb->ram.ranks > 1)
486 nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data); 503 nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
487 } else 504 } else
488 if (mr <= 3) { 505 if (mr <= 3) {
489 nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data); 506 nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
490 if (dev_priv->vram_rank_B) 507 if (pfb->ram.ranks > 1)
491 nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data); 508 nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
492 } 509 }
493 } else { 510 } else {
494 if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data); 511 if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data);
495 else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data); 512 else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data);
496 else if (mr == 15) nv_wr32(dev, 0x10f34c, data); 513 else if (mr == 15) nv_wr32(device, 0x10f34c, data);
497 } 514 }
498} 515}
499 516
500static void 517static void
501mclk_clock_set(struct nouveau_mem_exec_func *exec) 518mclk_clock_set(struct nouveau_mem_exec_func *exec)
502{ 519{
520 struct nouveau_device *device = nouveau_dev(exec->dev);
503 struct nvc0_pm_state *info = exec->priv; 521 struct nvc0_pm_state *info = exec->priv;
504 struct drm_device *dev = exec->dev; 522 u32 ctrl = nv_rd32(device, 0x132000);
505 u32 ctrl = nv_rd32(dev, 0x132000);
506 523
507 nv_wr32(dev, 0x137360, 0x00000001); 524 nv_wr32(device, 0x137360, 0x00000001);
508 nv_wr32(dev, 0x137370, 0x00000000); 525 nv_wr32(device, 0x137370, 0x00000000);
509 nv_wr32(dev, 0x137380, 0x00000000); 526 nv_wr32(device, 0x137380, 0x00000000);
510 if (ctrl & 0x00000001) 527 if (ctrl & 0x00000001)
511 nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001)); 528 nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
512 529
513 nv_wr32(dev, 0x132004, info->mem.coef); 530 nv_wr32(device, 0x132004, info->mem.coef);
514 nv_wr32(dev, 0x132000, (ctrl |= 0x00000001)); 531 nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
515 nv_wait(dev, 0x137390, 0x00000002, 0x00000002); 532 nv_wait(device, 0x137390, 0x00000002, 0x00000002);
516 nv_wr32(dev, 0x132018, 0x00005000); 533 nv_wr32(device, 0x132018, 0x00005000);
517 534
518 nv_wr32(dev, 0x137370, 0x00000001); 535 nv_wr32(device, 0x137370, 0x00000001);
519 nv_wr32(dev, 0x137380, 0x00000001); 536 nv_wr32(device, 0x137380, 0x00000001);
520 nv_wr32(dev, 0x137360, 0x00000000); 537 nv_wr32(device, 0x137360, 0x00000000);
521} 538}
522 539
523static void 540static void
524mclk_timing_set(struct nouveau_mem_exec_func *exec) 541mclk_timing_set(struct nouveau_mem_exec_func *exec)
525{ 542{
543 struct nouveau_device *device = nouveau_dev(exec->dev);
526 struct nvc0_pm_state *info = exec->priv; 544 struct nvc0_pm_state *info = exec->priv;
527 struct nouveau_pm_level *perflvl = info->perflvl; 545 struct nouveau_pm_level *perflvl = info->perflvl;
528 int i; 546 int i;
529 547
530 for (i = 0; i < 5; i++) 548 for (i = 0; i < 5; i++)
531 nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]); 549 nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
532} 550}
533 551
534static void 552static void
535prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) 553prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
536{ 554{
537 struct drm_nouveau_private *dev_priv = dev->dev_private; 555 struct nouveau_device *device = nouveau_dev(dev);
538 struct nouveau_mem_exec_func exec = { 556 struct nouveau_mem_exec_func exec = {
539 .dev = dev, 557 .dev = dev,
540 .precharge = mclk_precharge, 558 .precharge = mclk_precharge,
@@ -549,17 +567,17 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
549 .priv = info 567 .priv = info
550 }; 568 };
551 569
552 if (dev_priv->chipset < 0xd0) 570 if (device->chipset < 0xd0)
553 nv_wr32(dev, 0x611200, 0x00003300); 571 nv_wr32(device, 0x611200, 0x00003300);
554 else 572 else
555 nv_wr32(dev, 0x62c000, 0x03030000); 573 nv_wr32(device, 0x62c000, 0x03030000);
556 574
557 nouveau_mem_exec(&exec, info->perflvl); 575 nouveau_mem_exec(&exec, info->perflvl);
558 576
559 if (dev_priv->chipset < 0xd0) 577 if (device->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003330); 578 nv_wr32(device, 0x611200, 0x00003330);
561 else 579 else
562 nv_wr32(dev, 0x62c000, 0x03030300); 580 nv_wr32(device, 0x62c000, 0x03030300);
563} 581}
564int 582int
565nvc0_pm_clocks_set(struct drm_device *dev, void *data) 583nvc0_pm_clocks_set(struct drm_device *dev, void *data)
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
deleted file mode 100644
index 93e8c164fec6..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_software.c
+++ /dev/null
@@ -1,153 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nvc0_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nvc0_software_chan {
38 struct nouveau_software_chan base;
39 struct nouveau_vma dispc_vma[4];
40};
41
42u64
43nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
44{
45 struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
46 return pch->dispc_vma[crtc].offset;
47}
48
49static int
50nvc0_software_context_new(struct nouveau_channel *chan, int engine)
51{
52 struct drm_device *dev = chan->dev;
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
55 struct nvc0_software_chan *pch;
56 int ret = 0, i;
57
58 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
59 if (!pch)
60 return -ENOMEM;
61
62 nouveau_software_context_new(&pch->base);
63 chan->engctx[engine] = pch;
64
65 /* map display semaphore buffers into channel's vm */
66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
67 struct nouveau_bo *bo;
68 if (dev_priv->card_type >= NV_D0)
69 bo = nvd0_display_crtc_sema(dev, i);
70 else
71 bo = nv50_display(dev)->crtc[i].sem.bo;
72
73 ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
74 }
75
76 if (ret)
77 psw->base.base.context_del(chan, engine);
78 return ret;
79}
80
81static void
82nvc0_software_context_del(struct nouveau_channel *chan, int engine)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nvc0_software_chan *pch = chan->engctx[engine];
87 int i;
88
89 if (dev_priv->card_type >= NV_D0) {
90 for (i = 0; i < dev->mode_config.num_crtc; i++) {
91 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
92 nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
93 }
94 } else
95 if (dev_priv->card_type >= NV_50) {
96 struct nv50_display *disp = nv50_display(dev);
97 for (i = 0; i < dev->mode_config.num_crtc; i++) {
98 struct nv50_display_crtc *dispc = &disp->crtc[i];
99 nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
100 }
101 }
102
103 chan->engctx[engine] = NULL;
104 kfree(pch);
105}
106
107static int
108nvc0_software_object_new(struct nouveau_channel *chan, int engine,
109 u32 handle, u16 class)
110{
111 return 0;
112}
113
114static int
115nvc0_software_init(struct drm_device *dev, int engine)
116{
117 return 0;
118}
119
120static int
121nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
122{
123 return 0;
124}
125
126static void
127nvc0_software_destroy(struct drm_device *dev, int engine)
128{
129 struct nvc0_software_priv *psw = nv_engine(dev, engine);
130
131 NVOBJ_ENGINE_DEL(dev, SW);
132 kfree(psw);
133}
134
135int
136nvc0_software_create(struct drm_device *dev)
137{
138 struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
139 if (!psw)
140 return -ENOMEM;
141
142 psw->base.base.destroy = nvc0_software_destroy;
143 psw->base.base.init = nvc0_software_init;
144 psw->base.base.fini = nvc0_software_fini;
145 psw->base.base.context_new = nvc0_software_context_new;
146 psw->base.base.context_del = nvc0_software_context_del;
147 psw->base.base.object_new = nvc0_software_object_new;
148 nouveau_software_create(&psw->base);
149
150 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
151 NVOBJ_CLASS(dev, 0x906e, SW);
152 return 0;
153}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
deleted file mode 100644
index a7eef8934c07..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29/* 0 = unsupported
30 * 1 = non-compressed
31 * 3 = compressed
32 */
33static const u8 types[256] = {
34 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
35 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
37 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
38 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
50};
51
52bool
53nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
54{
55 u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
56 return likely((types[memtype] == 1));
57}
58
59int
60nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem)
62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
65 struct nouveau_mm_node *r;
66 struct nouveau_mem *mem;
67 int ret;
68
69 size >>= 12;
70 align >>= 12;
71 ncmin >>= 12;
72
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74 if (!mem)
75 return -ENOMEM;
76
77 INIT_LIST_HEAD(&mem->regions);
78 mem->dev = dev_priv->dev;
79 mem->memtype = (type & 0xff);
80 mem->size = size;
81
82 mutex_lock(&mm->mutex);
83 do {
84 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
85 if (ret) {
86 mutex_unlock(&mm->mutex);
87 nv50_vram_del(dev, &mem);
88 return ret;
89 }
90
91 list_add_tail(&r->rl_entry, &mem->regions);
92 size -= r->length;
93 } while (size);
94 mutex_unlock(&mm->mutex);
95
96 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
97 mem->offset = (u64)r->offset << 12;
98 *pmem = mem;
99 return 0;
100}
101
102int
103nvc0_vram_init(struct drm_device *dev)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
109 u32 parts = nv_rd32(dev, 0x022438);
110 u32 pmask = nv_rd32(dev, 0x022554);
111 u32 bsize = nv_rd32(dev, 0x10f20c);
112 u32 offset, length;
113 bool uniform = true;
114 int ret, part;
115
116 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
117 NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
118
119 dev_priv->vram_type = nouveau_mem_vbios_type(dev);
120 dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
121
122 /* read amount of vram attached to each memory controller */
123 for (part = 0; part < parts; part++) {
124 if (!(pmask & (1 << part))) {
125 u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
126 if (psize != bsize) {
127 if (psize < bsize)
128 bsize = psize;
129 uniform = false;
130 }
131
132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
133 dev_priv->vram_size += (u64)psize << 20;
134 }
135 }
136
137 /* if all controllers have the same amount attached, there's no holes */
138 if (uniform) {
139 offset = rsvd_head;
140 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
141 return nouveau_mm_init(&vram->mm, offset, length, 1);
142 }
143
144 /* otherwise, address lowest common amount from 0GiB */
145 ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
146 if (ret)
147 return ret;
148
149 /* and the rest starting from (8GiB + common_size) */
150 offset = (0x0200000000ULL >> 12) + (bsize << 8);
151 length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
152
153 ret = nouveau_mm_init(&vram->mm, offset, length, 0);
154 if (ret) {
155 nouveau_mm_fini(&vram->mm);
156 return ret;
157 }
158
159 return 0;
160}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 8a2fc89b7763..37037bc33266 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -27,15 +27,21 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32#include "nouveau_gem.h"
31#include "nouveau_connector.h" 33#include "nouveau_connector.h"
32#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
33#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 36#include "nouveau_fence.h"
35#include "nouveau_fb.h"
36#include "nouveau_software.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <core/gpuobj.h>
40
41#include <subdev/timer.h>
42#include <subdev/bar.h>
43#include <subdev/fb.h>
44
39#define EVO_DMA_NR 9 45#define EVO_DMA_NR 9
40 46
41#define EVO_MASTER (0x00) 47#define EVO_MASTER (0x00)
@@ -72,8 +78,7 @@ struct nvd0_display {
72static struct nvd0_display * 78static struct nvd0_display *
73nvd0_display(struct drm_device *dev) 79nvd0_display(struct drm_device *dev)
74{ 80{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 return nouveau_display(dev)->priv;
76 return dev_priv->engine.display.priv;
77} 82}
78 83
79static struct drm_crtc * 84static struct drm_crtc *
@@ -88,55 +93,47 @@ nvd0_display_crtc_get(struct drm_encoder *encoder)
88static inline int 93static inline int
89evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) 94evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
90{ 95{
96 struct nouveau_device *device = nouveau_dev(dev);
91 int ret = 0; 97 int ret = 0;
92 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); 98 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
93 nv_wr32(dev, 0x610704 + (id * 0x10), data); 99 nv_wr32(device, 0x610704 + (id * 0x10), data);
94 nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); 100 nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
95 if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) 101 if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
96 ret = -EBUSY; 102 ret = -EBUSY;
97 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); 103 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
98 return ret; 104 return ret;
99} 105}
100 106
101static u32 * 107static u32 *
102evo_wait(struct drm_device *dev, int id, int nr) 108evo_wait(struct drm_device *dev, int id, int nr)
103{ 109{
110 struct nouveau_device *device = nouveau_dev(dev);
111 struct nouveau_drm *drm = nouveau_drm(dev);
104 struct nvd0_display *disp = nvd0_display(dev); 112 struct nvd0_display *disp = nvd0_display(dev);
105 u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4; 113 u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
106 114
107 if (put + nr >= (PAGE_SIZE / 4)) { 115 if (put + nr >= (PAGE_SIZE / 4)) {
108 disp->evo[id].ptr[put] = 0x20000000; 116 disp->evo[id].ptr[put] = 0x20000000;
109 117
110 nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000); 118 nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
111 if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { 119 if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
112 NV_ERROR(dev, "evo %d dma stalled\n", id); 120 NV_ERROR(drm, "evo %d dma stalled\n", id);
113 return NULL; 121 return NULL;
114 } 122 }
115 123
116 put = 0; 124 put = 0;
117 } 125 }
118 126
119 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
120 NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
121
122 return disp->evo[id].ptr + put; 127 return disp->evo[id].ptr + put;
123} 128}
124 129
125static void 130static void
126evo_kick(u32 *push, struct drm_device *dev, int id) 131evo_kick(u32 *push, struct drm_device *dev, int id)
127{ 132{
133 struct nouveau_device *device = nouveau_dev(dev);
128 struct nvd0_display *disp = nvd0_display(dev); 134 struct nvd0_display *disp = nvd0_display(dev);
129 135
130 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) { 136 nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
131 u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
132 u32 *cur = disp->evo[id].ptr + curp;
133
134 while (cur < push)
135 NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
136 NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
137 }
138
139 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
140} 137}
141 138
142#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) 139#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -145,6 +142,8 @@ evo_kick(u32 *push, struct drm_device *dev, int id)
145static int 142static int
146evo_init_dma(struct drm_device *dev, int ch) 143evo_init_dma(struct drm_device *dev, int ch)
147{ 144{
145 struct nouveau_device *device = nouveau_dev(dev);
146 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvd0_display *disp = nvd0_display(dev); 147 struct nvd0_display *disp = nvd0_display(dev);
149 u32 flags; 148 u32 flags;
150 149
@@ -152,68 +151,76 @@ evo_init_dma(struct drm_device *dev, int ch)
152 if (ch == EVO_MASTER) 151 if (ch == EVO_MASTER)
153 flags |= 0x01000000; 152 flags |= 0x01000000;
154 153
155 nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3); 154 nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
156 nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000); 155 nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
157 nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001); 156 nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
158 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); 157 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
159 nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000); 158 nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
160 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags); 159 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
161 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) { 160 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
162 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, 161 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
163 nv_rd32(dev, 0x610490 + (ch * 0x0010))); 162 nv_rd32(device, 0x610490 + (ch * 0x0010)));
164 return -EBUSY; 163 return -EBUSY;
165 } 164 }
166 165
167 nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); 166 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
168 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); 167 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
169 return 0; 168 return 0;
170} 169}
171 170
172static void 171static void
173evo_fini_dma(struct drm_device *dev, int ch) 172evo_fini_dma(struct drm_device *dev, int ch)
174{ 173{
175 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010)) 174 struct nouveau_device *device = nouveau_dev(dev);
175
176 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
176 return; 177 return;
177 178
178 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000); 179 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
179 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000); 180 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
180 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000); 181 nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
181 nv_mask(dev, 0x610090, (1 << ch), 0x00000000); 182 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
182 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); 183 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
183} 184}
184 185
185static inline void 186static inline void
186evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data) 187evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
187{ 188{
188 nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data); 189 struct nouveau_device *device = nouveau_dev(dev);
190 nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
189} 191}
190 192
191static int 193static int
192evo_init_pio(struct drm_device *dev, int ch) 194evo_init_pio(struct drm_device *dev, int ch)
193{ 195{
194 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001); 196 struct nouveau_device *device = nouveau_dev(dev);
195 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) { 197 struct nouveau_drm *drm = nouveau_drm(dev);
196 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, 198
197 nv_rd32(dev, 0x610490 + (ch * 0x0010))); 199 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
200 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
201 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
202 nv_rd32(device, 0x610490 + (ch * 0x0010)));
198 return -EBUSY; 203 return -EBUSY;
199 } 204 }
200 205
201 nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); 206 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
202 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); 207 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
203 return 0; 208 return 0;
204} 209}
205 210
206static void 211static void
207evo_fini_pio(struct drm_device *dev, int ch) 212evo_fini_pio(struct drm_device *dev, int ch)
208{ 213{
209 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001)) 214 struct nouveau_device *device = nouveau_dev(dev);
215
216 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
210 return; 217 return;
211 218
212 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); 219 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
213 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000); 220 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
214 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000); 221 nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
215 nv_mask(dev, 0x610090, (1 << ch), 0x00000000); 222 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
216 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); 223 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
217} 224}
218 225
219static bool 226static bool
@@ -225,6 +232,7 @@ evo_sync_wait(void *data)
225static int 232static int
226evo_sync(struct drm_device *dev, int ch) 233evo_sync(struct drm_device *dev, int ch)
227{ 234{
235 struct nouveau_device *device = nouveau_dev(dev);
228 struct nvd0_display *disp = nvd0_display(dev); 236 struct nvd0_display *disp = nvd0_display(dev);
229 u32 *push = evo_wait(dev, ch, 8); 237 u32 *push = evo_wait(dev, ch, 8);
230 if (push) { 238 if (push) {
@@ -235,7 +243,7 @@ evo_sync(struct drm_device *dev, int ch)
235 evo_data(push, 0x00000000); 243 evo_data(push, 0x00000000);
236 evo_data(push, 0x00000000); 244 evo_data(push, 0x00000000);
237 evo_kick(push, dev, ch); 245 evo_kick(push, dev, ch);
238 if (nv_wait_cb(dev, evo_sync_wait, disp->sync)) 246 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
239 return 0; 247 return 0;
240 } 248 }
241 249
@@ -300,7 +308,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
300 return ret; 308 return ret;
301 309
302 310
303 offset = nvc0_software_crtc(chan, nv_crtc->index); 311 offset = nvc0_fence_crtc(chan, nv_crtc->index);
304 offset += evo->sem.offset; 312 offset += evo->sem.offset;
305 313
306 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 314 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
@@ -363,7 +371,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
363static int 371static int
364nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) 372nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
365{ 373{
366 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; 374 struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
367 struct drm_device *dev = nv_crtc->base.dev; 375 struct drm_device *dev = nv_crtc->base.dev;
368 struct nouveau_connector *nv_connector; 376 struct nouveau_connector *nv_connector;
369 struct drm_connector *connector; 377 struct drm_connector *connector;
@@ -386,7 +394,7 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
386 mode |= nv_connector->dithering_depth; 394 mode |= nv_connector->dithering_depth;
387 } 395 }
388 396
389 if (dev_priv->card_type < NV_E0) 397 if (nv_device(drm->device)->card_type < NV_E0)
390 mthd = 0x0490 + (nv_crtc->index * 0x0300); 398 mthd = 0x0490 + (nv_crtc->index * 0x0300);
391 else 399 else
392 mthd = 0x04a0 + (nv_crtc->index * 0x0300); 400 mthd = 0x04a0 + (nv_crtc->index * 0x0300);
@@ -701,11 +709,12 @@ static int
701nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 709nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
702 struct drm_framebuffer *old_fb) 710 struct drm_framebuffer *old_fb)
703{ 711{
712 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 713 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705 int ret; 714 int ret;
706 715
707 if (!crtc->fb) { 716 if (!crtc->fb) {
708 NV_DEBUG_KMS(crtc->dev, "No FB bound\n"); 717 NV_DEBUG(drm, "No FB bound\n");
709 return 0; 718 return 0;
710 } 719 }
711 720
@@ -923,6 +932,7 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
923{ 932{
924 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 933 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
925 struct drm_device *dev = encoder->dev; 934 struct drm_device *dev = encoder->dev;
935 struct nouveau_device *device = nouveau_dev(dev);
926 int or = nv_encoder->or; 936 int or = nv_encoder->or;
927 u32 dpms_ctrl; 937 u32 dpms_ctrl;
928 938
@@ -932,9 +942,9 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
932 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) 942 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
933 dpms_ctrl |= 0x00000004; 943 dpms_ctrl |= 0x00000004;
934 944
935 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); 945 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
936 nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); 946 nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
937 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); 947 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
938} 948}
939 949
940static bool 950static bool
@@ -1025,18 +1035,19 @@ nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1025 enum drm_connector_status status = connector_status_disconnected; 1035 enum drm_connector_status status = connector_status_disconnected;
1026 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1036 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1027 struct drm_device *dev = encoder->dev; 1037 struct drm_device *dev = encoder->dev;
1038 struct nouveau_device *device = nouveau_dev(dev);
1028 int or = nv_encoder->or; 1039 int or = nv_encoder->or;
1029 u32 load; 1040 u32 load;
1030 1041
1031 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000); 1042 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
1032 udelay(9500); 1043 udelay(9500);
1033 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000); 1044 nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
1034 1045
1035 load = nv_rd32(dev, 0x61a00c + (or * 0x800)); 1046 load = nv_rd32(device, 0x61a00c + (or * 0x800));
1036 if ((load & 0x38000000) == 0x38000000) 1047 if ((load & 0x38000000) == 0x38000000)
1037 status = connector_status_connected; 1048 status = connector_status_connected;
1038 1049
1039 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000); 1050 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
1040 return status; 1051 return status;
1041} 1052}
1042 1053
@@ -1063,7 +1074,7 @@ static const struct drm_encoder_funcs nvd0_dac_func = {
1063}; 1074};
1064 1075
1065static int 1076static int
1066nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe) 1077nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1067{ 1078{
1068 struct drm_device *dev = connector->dev; 1079 struct drm_device *dev = connector->dev;
1069 struct nouveau_encoder *nv_encoder; 1080 struct nouveau_encoder *nv_encoder;
@@ -1094,24 +1105,25 @@ nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1094 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1095 struct nouveau_connector *nv_connector; 1106 struct nouveau_connector *nv_connector;
1096 struct drm_device *dev = encoder->dev; 1107 struct drm_device *dev = encoder->dev;
1108 struct nouveau_device *device = nouveau_dev(dev);
1097 int i, or = nv_encoder->or * 0x30; 1109 int i, or = nv_encoder->or * 0x30;
1098 1110
1099 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1111 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1100 if (!drm_detect_monitor_audio(nv_connector->edid)) 1112 if (!drm_detect_monitor_audio(nv_connector->edid))
1101 return; 1113 return;
1102 1114
1103 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001); 1115 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
1104 1116
1105 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1117 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1106 if (nv_connector->base.eld[0]) { 1118 if (nv_connector->base.eld[0]) {
1107 u8 *eld = nv_connector->base.eld; 1119 u8 *eld = nv_connector->base.eld;
1108 1120
1109 for (i = 0; i < eld[2] * 4; i++) 1121 for (i = 0; i < eld[2] * 4; i++)
1110 nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]); 1122 nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
1111 for (i = eld[2] * 4; i < 0x60; i++) 1123 for (i = eld[2] * 4; i < 0x60; i++)
1112 nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00); 1124 nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
1113 1125
1114 nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002); 1126 nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
1115 } 1127 }
1116} 1128}
1117 1129
@@ -1120,9 +1132,10 @@ nvd0_audio_disconnect(struct drm_encoder *encoder)
1120{ 1132{
1121 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1133 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1122 struct drm_device *dev = encoder->dev; 1134 struct drm_device *dev = encoder->dev;
1135 struct nouveau_device *device = nouveau_dev(dev);
1123 int or = nv_encoder->or * 0x30; 1136 int or = nv_encoder->or * 0x30;
1124 1137
1125 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000); 1138 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
1126} 1139}
1127 1140
1128/****************************************************************************** 1141/******************************************************************************
@@ -1135,6 +1148,7 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1135 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1148 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1136 struct nouveau_connector *nv_connector; 1149 struct nouveau_connector *nv_connector;
1137 struct drm_device *dev = encoder->dev; 1150 struct drm_device *dev = encoder->dev;
1151 struct nouveau_device *device = nouveau_dev(dev);
1138 int head = nv_crtc->index * 0x800; 1152 int head = nv_crtc->index * 0x800;
1139 u32 rekey = 56; /* binary driver, and tegra constant */ 1153 u32 rekey = 56; /* binary driver, and tegra constant */
1140 u32 max_ac_packet; 1154 u32 max_ac_packet;
@@ -1149,25 +1163,25 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1149 max_ac_packet /= 32; 1163 max_ac_packet /= 32;
1150 1164
1151 /* AVI InfoFrame */ 1165 /* AVI InfoFrame */
1152 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); 1166 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1153 nv_wr32(dev, 0x61671c + head, 0x000d0282); 1167 nv_wr32(device, 0x61671c + head, 0x000d0282);
1154 nv_wr32(dev, 0x616720 + head, 0x0000006f); 1168 nv_wr32(device, 0x616720 + head, 0x0000006f);
1155 nv_wr32(dev, 0x616724 + head, 0x00000000); 1169 nv_wr32(device, 0x616724 + head, 0x00000000);
1156 nv_wr32(dev, 0x616728 + head, 0x00000000); 1170 nv_wr32(device, 0x616728 + head, 0x00000000);
1157 nv_wr32(dev, 0x61672c + head, 0x00000000); 1171 nv_wr32(device, 0x61672c + head, 0x00000000);
1158 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001); 1172 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
1159 1173
1160 /* ??? InfoFrame? */ 1174 /* ??? InfoFrame? */
1161 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); 1175 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1162 nv_wr32(dev, 0x6167ac + head, 0x00000010); 1176 nv_wr32(device, 0x6167ac + head, 0x00000010);
1163 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001); 1177 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
1164 1178
1165 /* HDMI_CTRL */ 1179 /* HDMI_CTRL */
1166 nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey | 1180 nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
1167 max_ac_packet << 16); 1181 max_ac_packet << 16);
1168 1182
1169 /* NFI, audio doesn't work without it though.. */ 1183 /* NFI, audio doesn't work without it though.. */
1170 nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000); 1184 nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
1171 1185
1172 nvd0_audio_mode_set(encoder, mode); 1186 nvd0_audio_mode_set(encoder, mode);
1173} 1187}
@@ -1178,37 +1192,41 @@ nvd0_hdmi_disconnect(struct drm_encoder *encoder)
1178 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1192 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1179 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); 1193 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1180 struct drm_device *dev = encoder->dev; 1194 struct drm_device *dev = encoder->dev;
1195 struct nouveau_device *device = nouveau_dev(dev);
1181 int head = nv_crtc->index * 0x800; 1196 int head = nv_crtc->index * 0x800;
1182 1197
1183 nvd0_audio_disconnect(encoder); 1198 nvd0_audio_disconnect(encoder);
1184 1199
1185 nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000); 1200 nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
1186 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); 1201 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1187 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); 1202 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1188} 1203}
1189 1204
1190/****************************************************************************** 1205/******************************************************************************
1191 * SOR 1206 * SOR
1192 *****************************************************************************/ 1207 *****************************************************************************/
1193static inline u32 1208static inline u32
1194nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) 1209nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
1195{ 1210{
1196 static const u8 nvd0[] = { 16, 8, 0, 24 }; 1211 static const u8 nvd0[] = { 16, 8, 0, 24 };
1197 return nvd0[lane]; 1212 return nvd0[lane];
1198} 1213}
1199 1214
1200static void 1215static void
1201nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) 1216nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
1202{ 1217{
1218 struct nouveau_device *device = nouveau_dev(dev);
1203 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1219 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1204 const u32 loff = (or * 0x800) + (link * 0x80); 1220 const u32 loff = (or * 0x800) + (link * 0x80);
1205 nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); 1221 nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1206} 1222}
1207 1223
1208static void 1224static void
1209nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, 1225nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
1210 u8 lane, u8 swing, u8 preem) 1226 u8 lane, u8 swing, u8 preem)
1211{ 1227{
1228 struct nouveau_device *device = nouveau_dev(dev);
1229 struct nouveau_drm *drm = nouveau_drm(dev);
1212 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1230 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1213 const u32 loff = (or * 0x800) + (link * 0x80); 1231 const u32 loff = (or * 0x800) + (link * 0x80);
1214 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); 1232 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
@@ -1236,25 +1254,26 @@ nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
1236 } 1254 }
1237 1255
1238 if (!config) { 1256 if (!config) {
1239 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 1257 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
1240 return; 1258 return;
1241 } 1259 }
1242 1260
1243 nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift); 1261 nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
1244 nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift); 1262 nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
1245 nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8); 1263 nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
1246 nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000); 1264 nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
1247} 1265}
1248 1266
1249static void 1267static void
1250nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, 1268nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
1251 int link_nr, u32 link_bw, bool enhframe) 1269 int link_nr, u32 link_bw, bool enhframe)
1252{ 1270{
1271 struct nouveau_device *device = nouveau_dev(dev);
1253 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1272 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1254 const u32 loff = (or * 0x800) + (link * 0x80); 1273 const u32 loff = (or * 0x800) + (link * 0x80);
1255 const u32 soff = (or * 0x800); 1274 const u32 soff = (or * 0x800);
1256 u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000; 1275 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
1257 u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000; 1276 u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
1258 u32 script = 0x0000, lane_mask = 0; 1277 u32 script = 0x0000, lane_mask = 0;
1259 u8 *table, *entry; 1278 u8 *table, *entry;
1260 int i; 1279 int i;
@@ -1284,20 +1303,21 @@ nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
1284 for (i = 0; i < link_nr; i++) 1303 for (i = 0; i < link_nr; i++)
1285 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); 1304 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
1286 1305
1287 nv_wr32(dev, 0x612300 + soff, clksor); 1306 nv_wr32(device, 0x612300 + soff, clksor);
1288 nv_wr32(dev, 0x61c10c + loff, dpctrl); 1307 nv_wr32(device, 0x61c10c + loff, dpctrl);
1289 nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask); 1308 nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
1290} 1309}
1291 1310
1292static void 1311static void
1293nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb, 1312nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
1294 u32 *link_nr, u32 *link_bw) 1313 u32 *link_nr, u32 *link_bw)
1295{ 1314{
1315 struct nouveau_device *device = nouveau_dev(dev);
1296 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1316 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1297 const u32 loff = (or * 0x800) + (link * 0x80); 1317 const u32 loff = (or * 0x800) + (link * 0x80);
1298 const u32 soff = (or * 0x800); 1318 const u32 soff = (or * 0x800);
1299 u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000; 1319 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
1300 u32 clksor = nv_rd32(dev, 0x612300 + soff); 1320 u32 clksor = nv_rd32(device, 0x612300 + soff);
1301 1321
1302 if (dpctrl > 0x00030000) *link_nr = 4; 1322 if (dpctrl > 0x00030000) *link_nr = 4;
1303 else if (dpctrl > 0x00010000) *link_nr = 2; 1323 else if (dpctrl > 0x00010000) *link_nr = 2;
@@ -1308,9 +1328,10 @@ nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
1308} 1328}
1309 1329
1310static void 1330static void
1311nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb, 1331nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
1312 u32 crtc, u32 datarate) 1332 u32 crtc, u32 datarate)
1313{ 1333{
1334 struct nouveau_device *device = nouveau_dev(dev);
1314 const u32 symbol = 100000; 1335 const u32 symbol = 100000;
1315 const u32 TU = 64; 1336 const u32 TU = 64;
1316 u32 link_nr, link_bw; 1337 u32 link_nr, link_bw;
@@ -1330,7 +1351,7 @@ nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
1330 value += 5; 1351 value += 5;
1331 value |= 0x08000000; 1352 value |= 0x08000000;
1332 1353
1333 nv_wr32(dev, 0x616610 + (crtc * 0x800), value); 1354 nv_wr32(device, 0x616610 + (crtc * 0x800), value);
1334} 1355}
1335 1356
1336static void 1357static void
@@ -1338,6 +1359,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1338{ 1359{
1339 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1360 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1340 struct drm_device *dev = encoder->dev; 1361 struct drm_device *dev = encoder->dev;
1362 struct nouveau_device *device = nouveau_dev(dev);
1341 struct drm_encoder *partner; 1363 struct drm_encoder *partner;
1342 int or = nv_encoder->or; 1364 int or = nv_encoder->or;
1343 u32 dpms_ctrl; 1365 u32 dpms_ctrl;
@@ -1361,12 +1383,12 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1361 dpms_ctrl = (mode == DRM_MODE_DPMS_ON); 1383 dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
1362 dpms_ctrl |= 0x80000000; 1384 dpms_ctrl |= 0x80000000;
1363 1385
1364 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); 1386 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1365 nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); 1387 nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
1366 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); 1388 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1367 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); 1389 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
1368 1390
1369 if (nv_encoder->dcb->type == OUTPUT_DP) { 1391 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1370 struct dp_train_func func = { 1392 struct dp_train_func func = {
1371 .link_set = nvd0_sor_dp_link_set, 1393 .link_set = nvd0_sor_dp_link_set,
1372 .train_set = nvd0_sor_dp_train_set, 1394 .train_set = nvd0_sor_dp_train_set,
@@ -1427,7 +1449,7 @@ static void
1427nvd0_sor_prepare(struct drm_encoder *encoder) 1449nvd0_sor_prepare(struct drm_encoder *encoder)
1428{ 1450{
1429 nvd0_sor_disconnect(encoder); 1451 nvd0_sor_disconnect(encoder);
1430 if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP) 1452 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1431 evo_sync(encoder->dev, EVO_MASTER); 1453 evo_sync(encoder->dev, EVO_MASTER);
1432} 1454}
1433 1455
@@ -1441,11 +1463,11 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1441 struct drm_display_mode *mode) 1463 struct drm_display_mode *mode)
1442{ 1464{
1443 struct drm_device *dev = encoder->dev; 1465 struct drm_device *dev = encoder->dev;
1444 struct drm_nouveau_private *dev_priv = dev->dev_private; 1466 struct nouveau_drm *drm = nouveau_drm(dev);
1445 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1446 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1468 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1447 struct nouveau_connector *nv_connector; 1469 struct nouveau_connector *nv_connector;
1448 struct nvbios *bios = &dev_priv->vbios; 1470 struct nvbios *bios = &drm->vbios;
1449 u32 mode_ctrl = (1 << nv_crtc->index); 1471 u32 mode_ctrl = (1 << nv_crtc->index);
1450 u32 syncs, magic, *push; 1472 u32 syncs, magic, *push;
1451 u32 or_config; 1473 u32 or_config;
@@ -1462,7 +1484,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1462 1484
1463 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1485 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1464 switch (nv_encoder->dcb->type) { 1486 switch (nv_encoder->dcb->type) {
1465 case OUTPUT_TMDS: 1487 case DCB_OUTPUT_TMDS:
1466 if (nv_encoder->dcb->sorconf.link & 1) { 1488 if (nv_encoder->dcb->sorconf.link & 1) {
1467 if (mode->clock < 165000) 1489 if (mode->clock < 165000)
1468 mode_ctrl |= 0x00000100; 1490 mode_ctrl |= 0x00000100;
@@ -1478,7 +1500,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1478 1500
1479 nvd0_hdmi_mode_set(encoder, mode); 1501 nvd0_hdmi_mode_set(encoder, mode);
1480 break; 1502 break;
1481 case OUTPUT_LVDS: 1503 case DCB_OUTPUT_LVDS:
1482 or_config = (mode_ctrl & 0x00000f00) >> 8; 1504 or_config = (mode_ctrl & 0x00000f00) >> 8;
1483 if (bios->fp_no_ddc) { 1505 if (bios->fp_no_ddc) {
1484 if (bios->fp.dual_link) 1506 if (bios->fp.dual_link)
@@ -1507,7 +1529,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1507 1529
1508 } 1530 }
1509 break; 1531 break;
1510 case OUTPUT_DP: 1532 case DCB_OUTPUT_DP:
1511 if (nv_connector->base.display_info.bpc == 6) { 1533 if (nv_connector->base.display_info.bpc == 6) {
1512 nv_encoder->dp.datarate = mode->clock * 18 / 8; 1534 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1513 syncs |= 0x00000002 << 6; 1535 syncs |= 0x00000002 << 6;
@@ -1530,7 +1552,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1530 1552
1531 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); 1553 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1532 1554
1533 if (nv_encoder->dcb->type == OUTPUT_DP) { 1555 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1534 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, 1556 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
1535 nv_encoder->dp.datarate); 1557 nv_encoder->dp.datarate);
1536 } 1558 }
@@ -1571,7 +1593,7 @@ static const struct drm_encoder_funcs nvd0_sor_func = {
1571}; 1593};
1572 1594
1573static int 1595static int
1574nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe) 1596nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1575{ 1597{
1576 struct drm_device *dev = connector->dev; 1598 struct drm_device *dev = connector->dev;
1577 struct nouveau_encoder *nv_encoder; 1599 struct nouveau_encoder *nv_encoder;
@@ -1597,50 +1619,51 @@ nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
1597/****************************************************************************** 1619/******************************************************************************
1598 * IRQ 1620 * IRQ
1599 *****************************************************************************/ 1621 *****************************************************************************/
1600static struct dcb_entry * 1622static struct dcb_output *
1601lookup_dcb(struct drm_device *dev, int id, u32 mc) 1623lookup_dcb(struct drm_device *dev, int id, u32 mc)
1602{ 1624{
1603 struct drm_nouveau_private *dev_priv = dev->dev_private; 1625 struct nouveau_drm *drm = nouveau_drm(dev);
1604 int type, or, i, link = -1; 1626 int type, or, i, link = -1;
1605 1627
1606 if (id < 4) { 1628 if (id < 4) {
1607 type = OUTPUT_ANALOG; 1629 type = DCB_OUTPUT_ANALOG;
1608 or = id; 1630 or = id;
1609 } else { 1631 } else {
1610 switch (mc & 0x00000f00) { 1632 switch (mc & 0x00000f00) {
1611 case 0x00000000: link = 0; type = OUTPUT_LVDS; break; 1633 case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
1612 case 0x00000100: link = 0; type = OUTPUT_TMDS; break; 1634 case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
1613 case 0x00000200: link = 1; type = OUTPUT_TMDS; break; 1635 case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
1614 case 0x00000500: link = 0; type = OUTPUT_TMDS; break; 1636 case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
1615 case 0x00000800: link = 0; type = OUTPUT_DP; break; 1637 case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
1616 case 0x00000900: link = 1; type = OUTPUT_DP; break; 1638 case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
1617 default: 1639 default:
1618 NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); 1640 NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
1619 return NULL; 1641 return NULL;
1620 } 1642 }
1621 1643
1622 or = id - 4; 1644 or = id - 4;
1623 } 1645 }
1624 1646
1625 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 1647 for (i = 0; i < drm->vbios.dcb.entries; i++) {
1626 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; 1648 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
1627 if (dcb->type == type && (dcb->or & (1 << or)) && 1649 if (dcb->type == type && (dcb->or & (1 << or)) &&
1628 (link < 0 || link == !(dcb->sorconf.link & 1))) 1650 (link < 0 || link == !(dcb->sorconf.link & 1)))
1629 return dcb; 1651 return dcb;
1630 } 1652 }
1631 1653
1632 NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc); 1654 NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
1633 return NULL; 1655 return NULL;
1634} 1656}
1635 1657
1636static void 1658static void
1637nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) 1659nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1638{ 1660{
1639 struct dcb_entry *dcb; 1661 struct nouveau_device *device = nouveau_dev(dev);
1662 struct dcb_output *dcb;
1640 int i; 1663 int i;
1641 1664
1642 for (i = 0; mask && i < 8; i++) { 1665 for (i = 0; mask && i < 8; i++) {
1643 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); 1666 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1644 if (!(mcc & (1 << crtc))) 1667 if (!(mcc & (1 << crtc)))
1645 continue; 1668 continue;
1646 1669
@@ -1651,20 +1674,22 @@ nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1651 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); 1674 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
1652 } 1675 }
1653 1676
1654 nv_wr32(dev, 0x6101d4, 0x00000000); 1677 nv_wr32(device, 0x6101d4, 0x00000000);
1655 nv_wr32(dev, 0x6109d4, 0x00000000); 1678 nv_wr32(device, 0x6109d4, 0x00000000);
1656 nv_wr32(dev, 0x6101d0, 0x80000000); 1679 nv_wr32(device, 0x6101d0, 0x80000000);
1657} 1680}
1658 1681
1659static void 1682static void
1660nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) 1683nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1661{ 1684{
1662 struct dcb_entry *dcb; 1685 struct nouveau_device *device = nouveau_dev(dev);
1686 struct nouveau_drm *drm = nouveau_drm(dev);
1687 struct dcb_output *dcb;
1663 u32 or, tmp, pclk; 1688 u32 or, tmp, pclk;
1664 int i; 1689 int i;
1665 1690
1666 for (i = 0; mask && i < 8; i++) { 1691 for (i = 0; mask && i < 8; i++) {
1667 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); 1692 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1668 if (!(mcc & (1 << crtc))) 1693 if (!(mcc & (1 << crtc)))
1669 continue; 1694 continue;
1670 1695
@@ -1675,16 +1700,16 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1675 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); 1700 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
1676 } 1701 }
1677 1702
1678 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; 1703 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1679 NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n", 1704 NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
1680 crtc, pclk, mask); 1705 crtc, pclk, mask);
1681 if (pclk && (mask & 0x00010000)) { 1706 if (pclk && (mask & 0x00010000)) {
1682 nv50_crtc_set_clock(dev, crtc, pclk); 1707 nv50_crtc_set_clock(dev, crtc, pclk);
1683 } 1708 }
1684 1709
1685 for (i = 0; mask && i < 8; i++) { 1710 for (i = 0; mask && i < 8; i++) {
1686 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); 1711 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1687 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); 1712 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1688 if (!(mcp & (1 << crtc))) 1713 if (!(mcp & (1 << crtc)))
1689 continue; 1714 continue;
1690 1715
@@ -1695,20 +1720,20 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1695 1720
1696 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); 1721 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
1697 1722
1698 nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000); 1723 nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
1699 switch (dcb->type) { 1724 switch (dcb->type) {
1700 case OUTPUT_ANALOG: 1725 case DCB_OUTPUT_ANALOG:
1701 nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000); 1726 nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
1702 break; 1727 break;
1703 case OUTPUT_TMDS: 1728 case DCB_OUTPUT_TMDS:
1704 case OUTPUT_LVDS: 1729 case DCB_OUTPUT_LVDS:
1705 case OUTPUT_DP: 1730 case DCB_OUTPUT_DP:
1706 if (cfg & 0x00000100) 1731 if (cfg & 0x00000100)
1707 tmp = 0x00000101; 1732 tmp = 0x00000101;
1708 else 1733 else
1709 tmp = 0x00000000; 1734 tmp = 0x00000000;
1710 1735
1711 nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp); 1736 nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
1712 break; 1737 break;
1713 default: 1738 default:
1714 break; 1739 break;
@@ -1717,22 +1742,23 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1717 break; 1742 break;
1718 } 1743 }
1719 1744
1720 nv_wr32(dev, 0x6101d4, 0x00000000); 1745 nv_wr32(device, 0x6101d4, 0x00000000);
1721 nv_wr32(dev, 0x6109d4, 0x00000000); 1746 nv_wr32(device, 0x6109d4, 0x00000000);
1722 nv_wr32(dev, 0x6101d0, 0x80000000); 1747 nv_wr32(device, 0x6101d0, 0x80000000);
1723} 1748}
1724 1749
1725static void 1750static void
1726nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) 1751nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1727{ 1752{
1728 struct dcb_entry *dcb; 1753 struct nouveau_device *device = nouveau_dev(dev);
1754 struct dcb_output *dcb;
1729 int pclk, i; 1755 int pclk, i;
1730 1756
1731 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; 1757 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1732 1758
1733 for (i = 0; mask && i < 8; i++) { 1759 for (i = 0; mask && i < 8; i++) {
1734 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); 1760 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1735 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); 1761 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1736 if (!(mcp & (1 << crtc))) 1762 if (!(mcp & (1 << crtc)))
1737 continue; 1763 continue;
1738 1764
@@ -1743,34 +1769,36 @@ nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1743 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); 1769 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
1744 } 1770 }
1745 1771
1746 nv_wr32(dev, 0x6101d4, 0x00000000); 1772 nv_wr32(device, 0x6101d4, 0x00000000);
1747 nv_wr32(dev, 0x6109d4, 0x00000000); 1773 nv_wr32(device, 0x6109d4, 0x00000000);
1748 nv_wr32(dev, 0x6101d0, 0x80000000); 1774 nv_wr32(device, 0x6101d0, 0x80000000);
1749} 1775}
1750 1776
1751static void 1777static void
1752nvd0_display_bh(unsigned long data) 1778nvd0_display_bh(unsigned long data)
1753{ 1779{
1754 struct drm_device *dev = (struct drm_device *)data; 1780 struct drm_device *dev = (struct drm_device *)data;
1781 struct nouveau_device *device = nouveau_dev(dev);
1782 struct nouveau_drm *drm = nouveau_drm(dev);
1755 struct nvd0_display *disp = nvd0_display(dev); 1783 struct nvd0_display *disp = nvd0_display(dev);
1756 u32 mask = 0, crtc = ~0; 1784 u32 mask = 0, crtc = ~0;
1757 int i; 1785 int i;
1758 1786
1759 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { 1787 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
1760 NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset); 1788 NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
1761 NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n", 1789 NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
1762 nv_rd32(dev, 0x6101d0), 1790 nv_rd32(device, 0x6101d0),
1763 nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4)); 1791 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
1764 for (i = 0; i < 8; i++) { 1792 for (i = 0; i < 8; i++) {
1765 NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n", 1793 NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
1766 i < 4 ? "DAC" : "SOR", i, 1794 i < 4 ? "DAC" : "SOR", i,
1767 nv_rd32(dev, 0x640180 + (i * 0x20)), 1795 nv_rd32(device, 0x640180 + (i * 0x20)),
1768 nv_rd32(dev, 0x660180 + (i * 0x20))); 1796 nv_rd32(device, 0x660180 + (i * 0x20)));
1769 } 1797 }
1770 } 1798 }
1771 1799
1772 while (!mask && ++crtc < dev->mode_config.num_crtc) 1800 while (!mask && ++crtc < dev->mode_config.num_crtc)
1773 mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800)); 1801 mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
1774 1802
1775 if (disp->modeset & 0x00000001) 1803 if (disp->modeset & 0x00000001)
1776 nvd0_display_unk1_handler(dev, crtc, mask); 1804 nvd0_display_unk1_handler(dev, crtc, mask);
@@ -1780,67 +1808,60 @@ nvd0_display_bh(unsigned long data)
1780 nvd0_display_unk4_handler(dev, crtc, mask); 1808 nvd0_display_unk4_handler(dev, crtc, mask);
1781} 1809}
1782 1810
1783static void 1811void
1784nvd0_display_intr(struct drm_device *dev) 1812nvd0_display_intr(struct drm_device *dev)
1785{ 1813{
1786 struct nvd0_display *disp = nvd0_display(dev); 1814 struct nvd0_display *disp = nvd0_display(dev);
1787 u32 intr = nv_rd32(dev, 0x610088); 1815 struct nouveau_device *device = nouveau_dev(dev);
1788 int i; 1816 struct nouveau_drm *drm = nouveau_drm(dev);
1817 u32 intr = nv_rd32(device, 0x610088);
1789 1818
1790 if (intr & 0x00000001) { 1819 if (intr & 0x00000001) {
1791 u32 stat = nv_rd32(dev, 0x61008c); 1820 u32 stat = nv_rd32(device, 0x61008c);
1792 nv_wr32(dev, 0x61008c, stat); 1821 nv_wr32(device, 0x61008c, stat);
1793 intr &= ~0x00000001; 1822 intr &= ~0x00000001;
1794 } 1823 }
1795 1824
1796 if (intr & 0x00000002) { 1825 if (intr & 0x00000002) {
1797 u32 stat = nv_rd32(dev, 0x61009c); 1826 u32 stat = nv_rd32(device, 0x61009c);
1798 int chid = ffs(stat) - 1; 1827 int chid = ffs(stat) - 1;
1799 if (chid >= 0) { 1828 if (chid >= 0) {
1800 u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12)); 1829 u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
1801 u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12)); 1830 u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
1802 u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12)); 1831 u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
1803 1832
1804 NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x " 1833 NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
1805 "0x%08x 0x%08x\n", 1834 "0x%08x 0x%08x\n",
1806 chid, (mthd & 0x0000ffc), data, mthd, unkn); 1835 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1807 nv_wr32(dev, 0x61009c, (1 << chid)); 1836 nv_wr32(device, 0x61009c, (1 << chid));
1808 nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000); 1837 nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1809 } 1838 }
1810 1839
1811 intr &= ~0x00000002; 1840 intr &= ~0x00000002;
1812 } 1841 }
1813 1842
1814 if (intr & 0x00100000) { 1843 if (intr & 0x00100000) {
1815 u32 stat = nv_rd32(dev, 0x6100ac); 1844 u32 stat = nv_rd32(device, 0x6100ac);
1816 1845
1817 if (stat & 0x00000007) { 1846 if (stat & 0x00000007) {
1818 disp->modeset = stat; 1847 disp->modeset = stat;
1819 tasklet_schedule(&disp->tasklet); 1848 tasklet_schedule(&disp->tasklet);
1820 1849
1821 nv_wr32(dev, 0x6100ac, (stat & 0x00000007)); 1850 nv_wr32(device, 0x6100ac, (stat & 0x00000007));
1822 stat &= ~0x00000007; 1851 stat &= ~0x00000007;
1823 } 1852 }
1824 1853
1825 if (stat) { 1854 if (stat) {
1826 NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat); 1855 NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
1827 nv_wr32(dev, 0x6100ac, stat); 1856 nv_wr32(device, 0x6100ac, stat);
1828 } 1857 }
1829 1858
1830 intr &= ~0x00100000; 1859 intr &= ~0x00100000;
1831 } 1860 }
1832 1861
1833 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1862 intr &= ~0x0f000000; /* vblank, handled in core */
1834 u32 mask = 0x01000000 << i;
1835 if (intr & mask) {
1836 u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
1837 nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
1838 intr &= ~mask;
1839 }
1840 }
1841
1842 if (intr) 1863 if (intr)
1843 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr); 1864 NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
1844} 1865}
1845 1866
1846/****************************************************************************** 1867/******************************************************************************
@@ -1867,15 +1888,17 @@ int
1867nvd0_display_init(struct drm_device *dev) 1888nvd0_display_init(struct drm_device *dev)
1868{ 1889{
1869 struct nvd0_display *disp = nvd0_display(dev); 1890 struct nvd0_display *disp = nvd0_display(dev);
1891 struct nouveau_device *device = nouveau_dev(dev);
1892 struct nouveau_drm *drm = nouveau_drm(dev);
1870 int ret, i; 1893 int ret, i;
1871 u32 *push; 1894 u32 *push;
1872 1895
1873 if (nv_rd32(dev, 0x6100ac) & 0x00000100) { 1896 if (nv_rd32(device, 0x6100ac) & 0x00000100) {
1874 nv_wr32(dev, 0x6100ac, 0x00000100); 1897 nv_wr32(device, 0x6100ac, 0x00000100);
1875 nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000); 1898 nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1876 if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) { 1899 if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
1877 NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n", 1900 NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
1878 nv_rd32(dev, 0x6194e8)); 1901 nv_rd32(device, 0x6194e8));
1879 return -EBUSY; 1902 return -EBUSY;
1880 } 1903 }
1881 } 1904 }
@@ -1884,27 +1907,27 @@ nvd0_display_init(struct drm_device *dev)
1884 * work at all unless you do the SOR part below. 1907 * work at all unless you do the SOR part below.
1885 */ 1908 */
1886 for (i = 0; i < 3; i++) { 1909 for (i = 0; i < 3; i++) {
1887 u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800)); 1910 u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
1888 nv_wr32(dev, 0x6101c0 + (i * 0x800), dac); 1911 nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
1889 } 1912 }
1890 1913
1891 for (i = 0; i < 4; i++) { 1914 for (i = 0; i < 4; i++) {
1892 u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800)); 1915 u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
1893 nv_wr32(dev, 0x6301c4 + (i * 0x800), sor); 1916 nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
1894 } 1917 }
1895 1918
1896 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1919 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1897 u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800)); 1920 u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
1898 u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800)); 1921 u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
1899 u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800)); 1922 u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
1900 nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0); 1923 nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
1901 nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1); 1924 nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
1902 nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2); 1925 nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
1903 } 1926 }
1904 1927
1905 /* point at our hash table / objects, enable interrupts */ 1928 /* point at our hash table / objects, enable interrupts */
1906 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9); 1929 nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
1907 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); 1930 nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
1908 1931
1909 /* init master */ 1932 /* init master */
1910 ret = evo_init_dma(dev, EVO_MASTER); 1933 ret = evo_init_dma(dev, EVO_MASTER);
@@ -1944,7 +1967,6 @@ error:
1944void 1967void
1945nvd0_display_destroy(struct drm_device *dev) 1968nvd0_display_destroy(struct drm_device *dev)
1946{ 1969{
1947 struct drm_nouveau_private *dev_priv = dev->dev_private;
1948 struct nvd0_display *disp = nvd0_display(dev); 1970 struct nvd0_display *disp = nvd0_display(dev);
1949 struct pci_dev *pdev = dev->pdev; 1971 struct pci_dev *pdev = dev->pdev;
1950 int i; 1972 int i;
@@ -1957,31 +1979,36 @@ nvd0_display_destroy(struct drm_device *dev)
1957 nouveau_gpuobj_ref(NULL, &disp->mem); 1979 nouveau_gpuobj_ref(NULL, &disp->mem);
1958 nouveau_bo_unmap(disp->sync); 1980 nouveau_bo_unmap(disp->sync);
1959 nouveau_bo_ref(NULL, &disp->sync); 1981 nouveau_bo_ref(NULL, &disp->sync);
1960 nouveau_irq_unregister(dev, 26);
1961 1982
1962 dev_priv->engine.display.priv = NULL; 1983 nouveau_display(dev)->priv = NULL;
1963 kfree(disp); 1984 kfree(disp);
1964} 1985}
1965 1986
1966int 1987int
1967nvd0_display_create(struct drm_device *dev) 1988nvd0_display_create(struct drm_device *dev)
1968{ 1989{
1969 struct drm_nouveau_private *dev_priv = dev->dev_private; 1990 struct nouveau_device *device = nouveau_dev(dev);
1970 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 1991 struct nouveau_drm *drm = nouveau_drm(dev);
1971 struct dcb_table *dcb = &dev_priv->vbios.dcb; 1992 struct nouveau_bar *bar = nouveau_bar(device);
1993 struct nouveau_fb *pfb = nouveau_fb(device);
1994 struct dcb_table *dcb = &drm->vbios.dcb;
1972 struct drm_connector *connector, *tmp; 1995 struct drm_connector *connector, *tmp;
1973 struct pci_dev *pdev = dev->pdev; 1996 struct pci_dev *pdev = dev->pdev;
1974 struct nvd0_display *disp; 1997 struct nvd0_display *disp;
1975 struct dcb_entry *dcbe; 1998 struct dcb_output *dcbe;
1976 int crtcs, ret, i; 1999 int crtcs, ret, i;
1977 2000
1978 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 2001 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
1979 if (!disp) 2002 if (!disp)
1980 return -ENOMEM; 2003 return -ENOMEM;
1981 dev_priv->engine.display.priv = disp; 2004
2005 nouveau_display(dev)->priv = disp;
2006 nouveau_display(dev)->dtor = nvd0_display_destroy;
2007 nouveau_display(dev)->init = nvd0_display_init;
2008 nouveau_display(dev)->fini = nvd0_display_fini;
1982 2009
1983 /* create crtc objects to represent the hw heads */ 2010 /* create crtc objects to represent the hw heads */
1984 crtcs = nv_rd32(dev, 0x022448); 2011 crtcs = nv_rd32(device, 0x022448);
1985 for (i = 0; i < crtcs; i++) { 2012 for (i = 0; i < crtcs; i++) {
1986 ret = nvd0_crtc_create(dev, i); 2013 ret = nvd0_crtc_create(dev, i);
1987 if (ret) 2014 if (ret)
@@ -1995,22 +2022,22 @@ nvd0_display_create(struct drm_device *dev)
1995 continue; 2022 continue;
1996 2023
1997 if (dcbe->location != DCB_LOC_ON_CHIP) { 2024 if (dcbe->location != DCB_LOC_ON_CHIP) {
1998 NV_WARN(dev, "skipping off-chip encoder %d/%d\n", 2025 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
1999 dcbe->type, ffs(dcbe->or) - 1); 2026 dcbe->type, ffs(dcbe->or) - 1);
2000 continue; 2027 continue;
2001 } 2028 }
2002 2029
2003 switch (dcbe->type) { 2030 switch (dcbe->type) {
2004 case OUTPUT_TMDS: 2031 case DCB_OUTPUT_TMDS:
2005 case OUTPUT_LVDS: 2032 case DCB_OUTPUT_LVDS:
2006 case OUTPUT_DP: 2033 case DCB_OUTPUT_DP:
2007 nvd0_sor_create(connector, dcbe); 2034 nvd0_sor_create(connector, dcbe);
2008 break; 2035 break;
2009 case OUTPUT_ANALOG: 2036 case DCB_OUTPUT_ANALOG:
2010 nvd0_dac_create(connector, dcbe); 2037 nvd0_dac_create(connector, dcbe);
2011 break; 2038 break;
2012 default: 2039 default:
2013 NV_WARN(dev, "skipping unsupported encoder %d/%d\n", 2040 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2014 dcbe->type, ffs(dcbe->or) - 1); 2041 dcbe->type, ffs(dcbe->or) - 1);
2015 continue; 2042 continue;
2016 } 2043 }
@@ -2021,14 +2048,13 @@ nvd0_display_create(struct drm_device *dev)
2021 if (connector->encoder_ids[0]) 2048 if (connector->encoder_ids[0])
2022 continue; 2049 continue;
2023 2050
2024 NV_WARN(dev, "%s has no encoders, removing\n", 2051 NV_WARN(drm, "%s has no encoders, removing\n",
2025 drm_get_connector_name(connector)); 2052 drm_get_connector_name(connector));
2026 connector->funcs->destroy(connector); 2053 connector->funcs->destroy(connector);
2027 } 2054 }
2028 2055
2029 /* setup interrupt handling */ 2056 /* setup interrupt handling */
2030 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); 2057 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
2031 nouveau_irq_register(dev, 26, nvd0_display_intr);
2032 2058
2033 /* small shared memory area we use for notifiers and semaphores */ 2059 /* small shared memory area we use for notifiers and semaphores */
2034 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2060 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2045,7 +2071,7 @@ nvd0_display_create(struct drm_device *dev)
2045 goto out; 2071 goto out;
2046 2072
2047 /* hash table and dma objects for the memory areas we care about */ 2073 /* hash table and dma objects for the memory areas we care about */
2048 ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000, 2074 ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
2049 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); 2075 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
2050 if (ret) 2076 if (ret)
2051 goto out; 2077 goto out;
@@ -2077,7 +2103,7 @@ nvd0_display_create(struct drm_device *dev)
2077 2103
2078 nv_wo32(disp->mem, dmao + 0x20, 0x00000049); 2104 nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
2079 nv_wo32(disp->mem, dmao + 0x24, 0x00000000); 2105 nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
2080 nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8); 2106 nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
2081 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); 2107 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
2082 nv_wo32(disp->mem, dmao + 0x30, 0x00000000); 2108 nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
2083 nv_wo32(disp->mem, dmao + 0x34, 0x00000000); 2109 nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
@@ -2087,7 +2113,7 @@ nvd0_display_create(struct drm_device *dev)
2087 2113
2088 nv_wo32(disp->mem, dmao + 0x40, 0x00000009); 2114 nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
2089 nv_wo32(disp->mem, dmao + 0x44, 0x00000000); 2115 nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
2090 nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8); 2116 nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
2091 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); 2117 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
2092 nv_wo32(disp->mem, dmao + 0x50, 0x00000000); 2118 nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
2093 nv_wo32(disp->mem, dmao + 0x54, 0x00000000); 2119 nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
@@ -2097,7 +2123,7 @@ nvd0_display_create(struct drm_device *dev)
2097 2123
2098 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); 2124 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
2099 nv_wo32(disp->mem, dmao + 0x64, 0x00000000); 2125 nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
2100 nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8); 2126 nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
2101 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); 2127 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
2102 nv_wo32(disp->mem, dmao + 0x70, 0x00000000); 2128 nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
2103 nv_wo32(disp->mem, dmao + 0x74, 0x00000000); 2129 nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
@@ -2106,7 +2132,7 @@ nvd0_display_create(struct drm_device *dev)
2106 ((dmao + 0x60) << 9)); 2132 ((dmao + 0x60) << 9));
2107 } 2133 }
2108 2134
2109 pinstmem->flush(dev); 2135 bar->flush(bar);
2110 2136
2111out: 2137out:
2112 if (ret) 2138 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
deleted file mode 100644
index e98d144e6eb9..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ /dev/null
@@ -1,452 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31#define NVE0_FIFO_ENGINE_NUM 32
32
33static void nve0_fifo_isr(struct drm_device *);
34
35struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nve0_fifo_priv {
41 struct nouveau_fifo_priv base;
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48};
49
50struct nve0_fifo_chan {
51 struct nouveau_fifo_chan base;
52 u32 engine;
53};
54
55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
79 for (i = 0, p = 0; i < priv->base.channels; i++) {
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93}
94
95static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i;
106
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
109 return -ENOMEM;
110
111 fctx->engine = 0; /* PGRAPH */
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129 upper_32_bits(ib_virt));
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146error:
147 if (ret)
148 priv->base.base.context_del(chan, engine);
149 return ret;
150}
151
152static void
153nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154{
155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
156 struct drm_device *dev = chan->dev;
157
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162 nve0_fifo_playlist_update(dev, fctx->engine);
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
172}
173
174static int
175nve0_fifo_init(struct drm_device *dev, int engine)
176{
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
207 continue;
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217}
218
219static int
220nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221{
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240}
241
242struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244};
245
246struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257};
258
259struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261};
262
263struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265};
266
267struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271};
272
273static void
274nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275{
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295}
296
297static int
298nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
299{
300 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_channel *chan = NULL;
303 unsigned long flags;
304 int ret = -EINVAL;
305
306 spin_lock_irqsave(&dev_priv->channels.lock, flags);
307 if (likely(chid >= 0 && chid < priv->base.channels)) {
308 chan = dev_priv->channels.ptr[chid];
309 if (likely(chan))
310 ret = nouveau_finish_page_flip(chan, NULL);
311 }
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313 return ret;
314}
315
316static void
317nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
318{
319 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
320 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
321 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
322 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
323 u32 subc = (addr & 0x00070000);
324 u32 mthd = (addr & 0x00003ffc);
325 u32 show = stat;
326
327 if (stat & 0x00200000) {
328 if (mthd == 0x0054) {
329 if (!nve0_fifo_page_flip(dev, chid))
330 show &= ~0x00200000;
331 }
332 }
333
334 if (show) {
335 NV_INFO(dev, "PFIFO%d:", unit);
336 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
337 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
338 unit, chid, subc, mthd, data);
339 }
340
341 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
342 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
343}
344
345static void
346nve0_fifo_isr(struct drm_device *dev)
347{
348 u32 stat = nv_rd32(dev, 0x002100);
349
350 if (stat & 0x00000100) {
351 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
352 nv_wr32(dev, 0x002100, 0x00000100);
353 stat &= ~0x00000100;
354 }
355
356 if (stat & 0x10000000) {
357 u32 units = nv_rd32(dev, 0x00259c);
358 u32 u = units;
359
360 while (u) {
361 int i = ffs(u) - 1;
362 nve0_fifo_isr_vm_fault(dev, i);
363 u &= ~(1 << i);
364 }
365
366 nv_wr32(dev, 0x00259c, units);
367 stat &= ~0x10000000;
368 }
369
370 if (stat & 0x20000000) {
371 u32 units = nv_rd32(dev, 0x0025a0);
372 u32 u = units;
373
374 while (u) {
375 int i = ffs(u) - 1;
376 nve0_fifo_isr_subfifo_intr(dev, i);
377 u &= ~(1 << i);
378 }
379
380 nv_wr32(dev, 0x0025a0, units);
381 stat &= ~0x20000000;
382 }
383
384 if (stat & 0x40000000) {
385 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
386 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
387 stat &= ~0x40000000;
388 }
389
390 if (stat) {
391 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
392 nv_wr32(dev, 0x002100, stat);
393 nv_wr32(dev, 0x002140, 0);
394 }
395}
396
397static void
398nve0_fifo_destroy(struct drm_device *dev, int engine)
399{
400 struct drm_nouveau_private *dev_priv = dev->dev_private;
401 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
402 int i;
403
404 nouveau_vm_put(&priv->user.bar);
405 nouveau_gpuobj_ref(NULL, &priv->user.mem);
406
407 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
408 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
409 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
410 }
411
412 dev_priv->eng[engine] = NULL;
413 kfree(priv);
414}
415
416int
417nve0_fifo_create(struct drm_device *dev)
418{
419 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 struct nve0_fifo_priv *priv;
421 int ret;
422
423 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
424 if (!priv)
425 return -ENOMEM;
426
427 priv->base.base.destroy = nve0_fifo_destroy;
428 priv->base.base.init = nve0_fifo_init;
429 priv->base.base.fini = nve0_fifo_fini;
430 priv->base.base.context_new = nve0_fifo_context_new;
431 priv->base.base.context_del = nve0_fifo_context_del;
432 priv->base.channels = 4096;
433 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
434
435 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
436 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
437 if (ret)
438 goto error;
439
440 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
441 12, NV_MEM_ACCESS_RW, &priv->user.bar);
442 if (ret)
443 goto error;
444
445 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
446
447 nouveau_irq_register(dev, 8, nve0_fifo_isr);
448error:
449 if (ret)
450 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
451 return ret;
452}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
deleted file mode 100644
index 8a8051b68f10..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.c
+++ /dev/null
@@ -1,831 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nve0_graph.h"
35
36static void
37nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nve0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nve0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
59
60static int
61nve0_graph_load_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64
65 nv_wr32(dev, 0x409840, 0x00000030);
66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
67 nv_wr32(dev, 0x409504, 0x00000003);
68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
70
71 return 0;
72}
73
74static int
75nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
76{
77 nv_wr32(dev, 0x409840, 0x00000003);
78 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
79 nv_wr32(dev, 0x409504, 0x00000009);
80 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
82 return -EBUSY;
83 }
84
85 return 0;
86}
87
88static int
89nve0_graph_construct_context(struct nouveau_channel *chan)
90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct drm_device *dev = chan->dev;
95 int ret, i;
96 u32 *ctx;
97
98 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
99 if (!ctx)
100 return -ENOMEM;
101
102 nve0_graph_load_context(chan);
103
104 nv_wo32(grch->grctx, 0x1c, 1);
105 nv_wo32(grch->grctx, 0x20, 0);
106 nv_wo32(grch->grctx, 0x28, 0);
107 nv_wo32(grch->grctx, 0x2c, 0);
108 dev_priv->engine.instmem.flush(dev);
109
110 ret = nve0_grctx_generate(chan);
111 if (ret)
112 goto err;
113
114 ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
115 if (ret)
116 goto err;
117
118 for (i = 0; i < priv->grctx_size; i += 4)
119 ctx[i / 4] = nv_ro32(grch->grctx, i);
120
121 priv->grctx_vals = ctx;
122 return 0;
123
124err:
125 kfree(ctx);
126 return ret;
127}
128
129static int
130nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
131{
132 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
133 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
134 struct drm_device *dev = chan->dev;
135 u32 magic[GPC_MAX][2];
136 u16 offset = 0x0000;
137 int gpc;
138 int ret;
139
140 ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
141 &grch->unk408004);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
146 &grch->unk40800c);
147 if (ret)
148 return ret;
149
150 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
151 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
152 &grch->unk418810);
153 if (ret)
154 return ret;
155
156 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
157 &grch->mmio);
158 if (ret)
159 return ret;
160
161#define mmio(r,v) do { \
162 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
163 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
164 grch->mmio_nr++; \
165} while (0)
166 mmio(0x40800c, grch->unk40800c->linst >> 8);
167 mmio(0x408010, 0x80000000);
168 mmio(0x419004, grch->unk40800c->linst >> 8);
169 mmio(0x419008, 0x00000000);
170 mmio(0x4064cc, 0x80000000);
171 mmio(0x408004, grch->unk408004->linst >> 8);
172 mmio(0x408008, 0x80000030);
173 mmio(0x418808, grch->unk408004->linst >> 8);
174 mmio(0x41880c, 0x80000030);
175 mmio(0x4064c8, 0x01800600);
176 mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
177 mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
178 mmio(0x405830, 0x02180648);
179 mmio(0x4064c4, 0x0192ffff);
180
181 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
182 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
183 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
184 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
185 magic[gpc][1] = 0x00000000 | (magic1 << 16);
186 offset += 0x0324 * priv->tpc_nr[gpc];
187 }
188
189 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
190 mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
191 mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
192 offset += 0x07ff * priv->tpc_nr[gpc];
193 }
194
195 mmio(0x17e91c, 0x06060609);
196 mmio(0x17e920, 0x00090a05);
197#undef mmio
198 return 0;
199}
200
201static int
202nve0_graph_context_new(struct nouveau_channel *chan, int engine)
203{
204 struct drm_device *dev = chan->dev;
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
207 struct nve0_graph_priv *priv = nv_engine(dev, engine);
208 struct nve0_graph_chan *grch;
209 struct nouveau_gpuobj *grctx;
210 int ret, i;
211
212 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
213 if (!grch)
214 return -ENOMEM;
215 chan->engctx[NVOBJ_ENGINE_GR] = grch;
216
217 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
218 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
219 &grch->grctx);
220 if (ret)
221 goto error;
222 grctx = grch->grctx;
223
224 ret = nve0_graph_create_context_mmio_list(chan);
225 if (ret)
226 goto error;
227
228 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
229 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
230 pinstmem->flush(dev);
231
232 if (!priv->grctx_vals) {
233 ret = nve0_graph_construct_context(chan);
234 if (ret)
235 goto error;
236 }
237
238 for (i = 0; i < priv->grctx_size; i += 4)
239 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
240 nv_wo32(grctx, 0xf4, 0);
241 nv_wo32(grctx, 0xf8, 0);
242 nv_wo32(grctx, 0x10, grch->mmio_nr);
243 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
244 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
245 nv_wo32(grctx, 0x1c, 1);
246 nv_wo32(grctx, 0x20, 0);
247 nv_wo32(grctx, 0x28, 0);
248 nv_wo32(grctx, 0x2c, 0);
249
250 pinstmem->flush(dev);
251 return 0;
252
253error:
254 priv->base.context_del(chan, engine);
255 return ret;
256}
257
258static void
259nve0_graph_context_del(struct nouveau_channel *chan, int engine)
260{
261 struct nve0_graph_chan *grch = chan->engctx[engine];
262
263 nouveau_gpuobj_ref(NULL, &grch->mmio);
264 nouveau_gpuobj_ref(NULL, &grch->unk418810);
265 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
266 nouveau_gpuobj_ref(NULL, &grch->unk408004);
267 nouveau_gpuobj_ref(NULL, &grch->grctx);
268 chan->engctx[engine] = NULL;
269}
270
271static int
272nve0_graph_object_new(struct nouveau_channel *chan, int engine,
273 u32 handle, u16 class)
274{
275 return 0;
276}
277
278static int
279nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
280{
281 return 0;
282}
283
284static void
285nve0_graph_init_obj418880(struct drm_device *dev)
286{
287 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
288 int i;
289
290 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
291 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
292 for (i = 0; i < 4; i++)
293 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
294 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
295 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
296}
297
298static void
299nve0_graph_init_regs(struct drm_device *dev)
300{
301 nv_wr32(dev, 0x400080, 0x003083c2);
302 nv_wr32(dev, 0x400088, 0x0001ffe7);
303 nv_wr32(dev, 0x40008c, 0x00000000);
304 nv_wr32(dev, 0x400090, 0x00000030);
305 nv_wr32(dev, 0x40013c, 0x003901f7);
306 nv_wr32(dev, 0x400140, 0x00000100);
307 nv_wr32(dev, 0x400144, 0x00000000);
308 nv_wr32(dev, 0x400148, 0x00000110);
309 nv_wr32(dev, 0x400138, 0x00000000);
310 nv_wr32(dev, 0x400130, 0x00000000);
311 nv_wr32(dev, 0x400134, 0x00000000);
312 nv_wr32(dev, 0x400124, 0x00000002);
313}
314
315static void
316nve0_graph_init_units(struct drm_device *dev)
317{
318 nv_wr32(dev, 0x409ffc, 0x00000000);
319 nv_wr32(dev, 0x409c14, 0x00003e3e);
320 nv_wr32(dev, 0x409c24, 0x000f0000);
321
322 nv_wr32(dev, 0x404000, 0xc0000000);
323 nv_wr32(dev, 0x404600, 0xc0000000);
324 nv_wr32(dev, 0x408030, 0xc0000000);
325 nv_wr32(dev, 0x404490, 0xc0000000);
326 nv_wr32(dev, 0x406018, 0xc0000000);
327 nv_wr32(dev, 0x407020, 0xc0000000);
328 nv_wr32(dev, 0x405840, 0xc0000000);
329 nv_wr32(dev, 0x405844, 0x00ffffff);
330
331 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
332 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
333
334}
335
336static void
337nve0_graph_init_gpc_0(struct drm_device *dev)
338{
339 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
340 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
341 u32 data[TPC_MAX / 8];
342 u8 tpcnr[GPC_MAX];
343 int i, gpc, tpc;
344
345 nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
346
347 memset(data, 0x00, sizeof(data));
348 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
349 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
350 do {
351 gpc = (gpc + 1) % priv->gpc_nr;
352 } while (!tpcnr[gpc]);
353 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
354
355 data[i / 8] |= tpc << ((i % 8) * 4);
356 }
357
358 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
359 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
360 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
361 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
362
363 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
364 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
365 priv->tpc_nr[gpc]);
366 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
367 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
368 }
369
370 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
371 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
372}
373
374static void
375nve0_graph_init_gpc_1(struct drm_device *dev)
376{
377 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
378 int gpc, tpc;
379
380 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
381 nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
382 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
383 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
384 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
385 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
386 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
387 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
388 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
389 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
390 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
391 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
392 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
393 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
394 }
395 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
396 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
397 }
398}
399
400static void
401nve0_graph_init_rop(struct drm_device *dev)
402{
403 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
404 int rop;
405
406 for (rop = 0; rop < priv->rop_nr; rop++) {
407 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
408 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
409 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
410 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
411 }
412}
413
414static void
415nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
416 struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
417{
418 int i;
419
420 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
421 for (i = 0; i < data->size / 4; i++)
422 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
423
424 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
425 for (i = 0; i < code->size / 4; i++) {
426 if ((i & 0x3f) == 0)
427 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
428 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
429 }
430}
431
432static int
433nve0_graph_init_ctxctl(struct drm_device *dev)
434{
435 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
436 u32 r000260;
437
438 /* load fuc microcode */
439 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
440 nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
441 nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
442 nv_wr32(dev, 0x000260, r000260);
443
444 /* start both of them running */
445 nv_wr32(dev, 0x409840, 0xffffffff);
446 nv_wr32(dev, 0x41a10c, 0x00000000);
447 nv_wr32(dev, 0x40910c, 0x00000000);
448 nv_wr32(dev, 0x41a100, 0x00000002);
449 nv_wr32(dev, 0x409100, 0x00000002);
450 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
451 NV_INFO(dev, "0x409800 wait failed\n");
452
453 nv_wr32(dev, 0x409840, 0xffffffff);
454 nv_wr32(dev, 0x409500, 0x7fffffff);
455 nv_wr32(dev, 0x409504, 0x00000021);
456
457 nv_wr32(dev, 0x409840, 0xffffffff);
458 nv_wr32(dev, 0x409500, 0x00000000);
459 nv_wr32(dev, 0x409504, 0x00000010);
460 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
461 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
462 return -EBUSY;
463 }
464 priv->grctx_size = nv_rd32(dev, 0x409800);
465
466 nv_wr32(dev, 0x409840, 0xffffffff);
467 nv_wr32(dev, 0x409500, 0x00000000);
468 nv_wr32(dev, 0x409504, 0x00000016);
469 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
470 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
471 return -EBUSY;
472 }
473
474 nv_wr32(dev, 0x409840, 0xffffffff);
475 nv_wr32(dev, 0x409500, 0x00000000);
476 nv_wr32(dev, 0x409504, 0x00000025);
477 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
478 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
479 return -EBUSY;
480 }
481
482 nv_wr32(dev, 0x409800, 0x00000000);
483 nv_wr32(dev, 0x409500, 0x00000001);
484 nv_wr32(dev, 0x409504, 0x00000030);
485 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
486 NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
487 return -EBUSY;
488 }
489
490 nv_wr32(dev, 0x409810, 0xb00095c8);
491 nv_wr32(dev, 0x409800, 0x00000000);
492 nv_wr32(dev, 0x409500, 0x00000001);
493 nv_wr32(dev, 0x409504, 0x00000031);
494 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
495 NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
496 return -EBUSY;
497 }
498
499 nv_wr32(dev, 0x409810, 0x00080420);
500 nv_wr32(dev, 0x409800, 0x00000000);
501 nv_wr32(dev, 0x409500, 0x00000001);
502 nv_wr32(dev, 0x409504, 0x00000032);
503 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
504 NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
505 return -EBUSY;
506 }
507
508 nv_wr32(dev, 0x409614, 0x00000070);
509 nv_wr32(dev, 0x409614, 0x00000770);
510 nv_wr32(dev, 0x40802c, 0x00000001);
511 return 0;
512}
513
514static int
515nve0_graph_init(struct drm_device *dev, int engine)
516{
517 int ret;
518
519 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
520 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
521
522 nve0_graph_init_obj418880(dev);
523 nve0_graph_init_regs(dev);
524 nve0_graph_init_gpc_0(dev);
525
526 nv_wr32(dev, 0x400500, 0x00010001);
527 nv_wr32(dev, 0x400100, 0xffffffff);
528 nv_wr32(dev, 0x40013c, 0xffffffff);
529
530 nve0_graph_init_units(dev);
531 nve0_graph_init_gpc_1(dev);
532 nve0_graph_init_rop(dev);
533
534 nv_wr32(dev, 0x400108, 0xffffffff);
535 nv_wr32(dev, 0x400138, 0xffffffff);
536 nv_wr32(dev, 0x400118, 0xffffffff);
537 nv_wr32(dev, 0x400130, 0xffffffff);
538 nv_wr32(dev, 0x40011c, 0xffffffff);
539 nv_wr32(dev, 0x400134, 0xffffffff);
540 nv_wr32(dev, 0x400054, 0x34ce3464);
541
542 ret = nve0_graph_init_ctxctl(dev);
543 if (ret)
544 return ret;
545
546 return 0;
547}
548
549int
550nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
551{
552 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
553 struct drm_nouveau_private *dev_priv = dev->dev_private;
554 struct nouveau_channel *chan;
555 unsigned long flags;
556 int i;
557
558 spin_lock_irqsave(&dev_priv->channels.lock, flags);
559 for (i = 0; i < pfifo->channels; i++) {
560 chan = dev_priv->channels.ptr[i];
561 if (!chan || !chan->ramin)
562 continue;
563
564 if (inst == chan->ramin->vinst)
565 break;
566 }
567 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
568 return i;
569}
570
571static void
572nve0_graph_ctxctl_isr(struct drm_device *dev)
573{
574 u32 ustat = nv_rd32(dev, 0x409c18);
575
576 if (ustat & 0x00000001)
577 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
578 if (ustat & 0x00080000)
579 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
580 if (ustat & ~0x00080001)
581 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
582
583 nve0_graph_ctxctl_debug(dev);
584 nv_wr32(dev, 0x409c20, ustat);
585}
586
587static void
588nve0_graph_trap_isr(struct drm_device *dev, int chid)
589{
590 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
591 u32 trap = nv_rd32(dev, 0x400108);
592 int rop;
593
594 if (trap & 0x00000001) {
595 u32 stat = nv_rd32(dev, 0x404000);
596 NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
597 nv_wr32(dev, 0x404000, 0xc0000000);
598 nv_wr32(dev, 0x400108, 0x00000001);
599 trap &= ~0x00000001;
600 }
601
602 if (trap & 0x00000010) {
603 u32 stat = nv_rd32(dev, 0x405840);
604 NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
605 nv_wr32(dev, 0x405840, 0xc0000000);
606 nv_wr32(dev, 0x400108, 0x00000010);
607 trap &= ~0x00000010;
608 }
609
610 if (trap & 0x02000000) {
611 for (rop = 0; rop < priv->rop_nr; rop++) {
612 u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
613 u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
614 NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
615 rop, chid, statz, statc);
616 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
617 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
618 }
619 nv_wr32(dev, 0x400108, 0x02000000);
620 trap &= ~0x02000000;
621 }
622
623 if (trap) {
624 NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
625 nv_wr32(dev, 0x400108, trap);
626 }
627}
628
629static void
630nve0_graph_isr(struct drm_device *dev)
631{
632 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
633 u32 chid = nve0_graph_isr_chid(dev, inst);
634 u32 stat = nv_rd32(dev, 0x400100);
635 u32 addr = nv_rd32(dev, 0x400704);
636 u32 mthd = (addr & 0x00003ffc);
637 u32 subc = (addr & 0x00070000) >> 16;
638 u32 data = nv_rd32(dev, 0x400708);
639 u32 code = nv_rd32(dev, 0x400110);
640 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
641
642 if (stat & 0x00000010) {
643 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
644 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
645 "subc %d class 0x%04x mthd 0x%04x "
646 "data 0x%08x\n",
647 chid, inst, subc, class, mthd, data);
648 }
649 nv_wr32(dev, 0x400100, 0x00000010);
650 stat &= ~0x00000010;
651 }
652
653 if (stat & 0x00000020) {
654 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
655 "class 0x%04x mthd 0x%04x data 0x%08x\n",
656 chid, inst, subc, class, mthd, data);
657 nv_wr32(dev, 0x400100, 0x00000020);
658 stat &= ~0x00000020;
659 }
660
661 if (stat & 0x00100000) {
662 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
663 nouveau_enum_print(nv50_data_error_names, code);
664 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
665 "mthd 0x%04x data 0x%08x\n",
666 chid, inst, subc, class, mthd, data);
667 nv_wr32(dev, 0x400100, 0x00100000);
668 stat &= ~0x00100000;
669 }
670
671 if (stat & 0x00200000) {
672 nve0_graph_trap_isr(dev, chid);
673 nv_wr32(dev, 0x400100, 0x00200000);
674 stat &= ~0x00200000;
675 }
676
677 if (stat & 0x00080000) {
678 nve0_graph_ctxctl_isr(dev);
679 nv_wr32(dev, 0x400100, 0x00080000);
680 stat &= ~0x00080000;
681 }
682
683 if (stat) {
684 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
685 nv_wr32(dev, 0x400100, stat);
686 }
687
688 nv_wr32(dev, 0x400500, 0x00010001);
689}
690
691static int
692nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
693 struct nve0_graph_fuc *fuc)
694{
695 struct drm_nouveau_private *dev_priv = dev->dev_private;
696 const struct firmware *fw;
697 char f[32];
698 int ret;
699
700 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
701 ret = request_firmware(&fw, f, &dev->pdev->dev);
702 if (ret)
703 return ret;
704
705 fuc->size = fw->size;
706 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
707 release_firmware(fw);
708 return (fuc->data != NULL) ? 0 : -ENOMEM;
709}
710
711static void
712nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
713{
714 if (fuc->data) {
715 kfree(fuc->data);
716 fuc->data = NULL;
717 }
718}
719
720static void
721nve0_graph_destroy(struct drm_device *dev, int engine)
722{
723 struct nve0_graph_priv *priv = nv_engine(dev, engine);
724
725 nve0_graph_destroy_fw(&priv->fuc409c);
726 nve0_graph_destroy_fw(&priv->fuc409d);
727 nve0_graph_destroy_fw(&priv->fuc41ac);
728 nve0_graph_destroy_fw(&priv->fuc41ad);
729
730 nouveau_irq_unregister(dev, 12);
731
732 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
733 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
734
735 if (priv->grctx_vals)
736 kfree(priv->grctx_vals);
737
738 NVOBJ_ENGINE_DEL(dev, GR);
739 kfree(priv);
740}
741
742int
743nve0_graph_create(struct drm_device *dev)
744{
745 struct drm_nouveau_private *dev_priv = dev->dev_private;
746 struct nve0_graph_priv *priv;
747 int ret, gpc, i;
748 u32 kepler;
749
750 kepler = nve0_graph_class(dev);
751 if (!kepler) {
752 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
753 return 0;
754 }
755
756 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
757 if (!priv)
758 return -ENOMEM;
759
760 priv->base.destroy = nve0_graph_destroy;
761 priv->base.init = nve0_graph_init;
762 priv->base.fini = nve0_graph_fini;
763 priv->base.context_new = nve0_graph_context_new;
764 priv->base.context_del = nve0_graph_context_del;
765 priv->base.object_new = nve0_graph_object_new;
766
767 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
768 nouveau_irq_register(dev, 12, nve0_graph_isr);
769
770 NV_INFO(dev, "PGRAPH: using external firmware\n");
771 if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
772 nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
773 nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
774 nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
775 ret = 0;
776 goto error;
777 }
778
779 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
780 if (ret)
781 goto error;
782
783 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
784 if (ret)
785 goto error;
786
787 for (i = 0; i < 0x1000; i += 4) {
788 nv_wo32(priv->unk4188b4, i, 0x00000010);
789 nv_wo32(priv->unk4188b8, i, 0x00000010);
790 }
791
792 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
793 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
794 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
795 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
796 priv->tpc_total += priv->tpc_nr[gpc];
797 }
798
799 switch (dev_priv->chipset) {
800 case 0xe4:
801 if (priv->tpc_total == 8)
802 priv->magic_not_rop_nr = 3;
803 else
804 if (priv->tpc_total == 7)
805 priv->magic_not_rop_nr = 1;
806 break;
807 case 0xe7:
808 priv->magic_not_rop_nr = 1;
809 break;
810 default:
811 break;
812 }
813
814 if (!priv->magic_not_rop_nr) {
815 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
816 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
817 priv->tpc_nr[3], priv->rop_nr);
818 priv->magic_not_rop_nr = 0x00;
819 }
820
821 NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
825 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
826 return 0;
827
828error:
829 nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
830 return ret;
831}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
deleted file mode 100644
index 2ba70449ba01..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVE0_GRAPH_H__
26#define __NVE0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TPC_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nve0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nve0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nve0_graph_fuc fuc409c;
46 struct nve0_graph_fuc fuc409d;
47 struct nve0_graph_fuc fuc41ac;
48 struct nve0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tpc_nr[GPC_MAX];
53 u8 tpc_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nve0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nve0_grctx_generate(struct nouveau_channel *);
73
74/* nve0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nve0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xe4:
82 case 0xe7:
83 return 0xa097;
84 default:
85 return 0;
86 }
87}
88
89#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
deleted file mode 100644
index d8cb360e92c1..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_grctx.c
+++ /dev/null
@@ -1,2777 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nve0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 0x00000002) {}
36}
37
38static void
39nve0_grctx_generate_icmd(struct drm_device *dev)
40{
41 nv_wr32(dev, 0x400208, 0x80000000);
42 nv_icmd(dev, 0x001000, 0x00000004);
43 nv_icmd(dev, 0x000039, 0x00000000);
44 nv_icmd(dev, 0x00003a, 0x00000000);
45 nv_icmd(dev, 0x00003b, 0x00000000);
46 nv_icmd(dev, 0x0000a9, 0x0000ffff);
47 nv_icmd(dev, 0x000038, 0x0fac6881);
48 nv_icmd(dev, 0x00003d, 0x00000001);
49 nv_icmd(dev, 0x0000e8, 0x00000400);
50 nv_icmd(dev, 0x0000e9, 0x00000400);
51 nv_icmd(dev, 0x0000ea, 0x00000400);
52 nv_icmd(dev, 0x0000eb, 0x00000400);
53 nv_icmd(dev, 0x0000ec, 0x00000400);
54 nv_icmd(dev, 0x0000ed, 0x00000400);
55 nv_icmd(dev, 0x0000ee, 0x00000400);
56 nv_icmd(dev, 0x0000ef, 0x00000400);
57 nv_icmd(dev, 0x000078, 0x00000300);
58 nv_icmd(dev, 0x000079, 0x00000300);
59 nv_icmd(dev, 0x00007a, 0x00000300);
60 nv_icmd(dev, 0x00007b, 0x00000300);
61 nv_icmd(dev, 0x00007c, 0x00000300);
62 nv_icmd(dev, 0x00007d, 0x00000300);
63 nv_icmd(dev, 0x00007e, 0x00000300);
64 nv_icmd(dev, 0x00007f, 0x00000300);
65 nv_icmd(dev, 0x000050, 0x00000011);
66 nv_icmd(dev, 0x000058, 0x00000008);
67 nv_icmd(dev, 0x000059, 0x00000008);
68 nv_icmd(dev, 0x00005a, 0x00000008);
69 nv_icmd(dev, 0x00005b, 0x00000008);
70 nv_icmd(dev, 0x00005c, 0x00000008);
71 nv_icmd(dev, 0x00005d, 0x00000008);
72 nv_icmd(dev, 0x00005e, 0x00000008);
73 nv_icmd(dev, 0x00005f, 0x00000008);
74 nv_icmd(dev, 0x000208, 0x00000001);
75 nv_icmd(dev, 0x000209, 0x00000001);
76 nv_icmd(dev, 0x00020a, 0x00000001);
77 nv_icmd(dev, 0x00020b, 0x00000001);
78 nv_icmd(dev, 0x00020c, 0x00000001);
79 nv_icmd(dev, 0x00020d, 0x00000001);
80 nv_icmd(dev, 0x00020e, 0x00000001);
81 nv_icmd(dev, 0x00020f, 0x00000001);
82 nv_icmd(dev, 0x000081, 0x00000001);
83 nv_icmd(dev, 0x000085, 0x00000004);
84 nv_icmd(dev, 0x000088, 0x00000400);
85 nv_icmd(dev, 0x000090, 0x00000300);
86 nv_icmd(dev, 0x000098, 0x00001001);
87 nv_icmd(dev, 0x0000e3, 0x00000001);
88 nv_icmd(dev, 0x0000da, 0x00000001);
89 nv_icmd(dev, 0x0000f8, 0x00000003);
90 nv_icmd(dev, 0x0000fa, 0x00000001);
91 nv_icmd(dev, 0x00009f, 0x0000ffff);
92 nv_icmd(dev, 0x0000a0, 0x0000ffff);
93 nv_icmd(dev, 0x0000a1, 0x0000ffff);
94 nv_icmd(dev, 0x0000a2, 0x0000ffff);
95 nv_icmd(dev, 0x0000b1, 0x00000001);
96 nv_icmd(dev, 0x0000ad, 0x0000013e);
97 nv_icmd(dev, 0x0000e1, 0x00000010);
98 nv_icmd(dev, 0x000290, 0x00000000);
99 nv_icmd(dev, 0x000291, 0x00000000);
100 nv_icmd(dev, 0x000292, 0x00000000);
101 nv_icmd(dev, 0x000293, 0x00000000);
102 nv_icmd(dev, 0x000294, 0x00000000);
103 nv_icmd(dev, 0x000295, 0x00000000);
104 nv_icmd(dev, 0x000296, 0x00000000);
105 nv_icmd(dev, 0x000297, 0x00000000);
106 nv_icmd(dev, 0x000298, 0x00000000);
107 nv_icmd(dev, 0x000299, 0x00000000);
108 nv_icmd(dev, 0x00029a, 0x00000000);
109 nv_icmd(dev, 0x00029b, 0x00000000);
110 nv_icmd(dev, 0x00029c, 0x00000000);
111 nv_icmd(dev, 0x00029d, 0x00000000);
112 nv_icmd(dev, 0x00029e, 0x00000000);
113 nv_icmd(dev, 0x00029f, 0x00000000);
114 nv_icmd(dev, 0x0003b0, 0x00000000);
115 nv_icmd(dev, 0x0003b1, 0x00000000);
116 nv_icmd(dev, 0x0003b2, 0x00000000);
117 nv_icmd(dev, 0x0003b3, 0x00000000);
118 nv_icmd(dev, 0x0003b4, 0x00000000);
119 nv_icmd(dev, 0x0003b5, 0x00000000);
120 nv_icmd(dev, 0x0003b6, 0x00000000);
121 nv_icmd(dev, 0x0003b7, 0x00000000);
122 nv_icmd(dev, 0x0003b8, 0x00000000);
123 nv_icmd(dev, 0x0003b9, 0x00000000);
124 nv_icmd(dev, 0x0003ba, 0x00000000);
125 nv_icmd(dev, 0x0003bb, 0x00000000);
126 nv_icmd(dev, 0x0003bc, 0x00000000);
127 nv_icmd(dev, 0x0003bd, 0x00000000);
128 nv_icmd(dev, 0x0003be, 0x00000000);
129 nv_icmd(dev, 0x0003bf, 0x00000000);
130 nv_icmd(dev, 0x0002a0, 0x00000000);
131 nv_icmd(dev, 0x0002a1, 0x00000000);
132 nv_icmd(dev, 0x0002a2, 0x00000000);
133 nv_icmd(dev, 0x0002a3, 0x00000000);
134 nv_icmd(dev, 0x0002a4, 0x00000000);
135 nv_icmd(dev, 0x0002a5, 0x00000000);
136 nv_icmd(dev, 0x0002a6, 0x00000000);
137 nv_icmd(dev, 0x0002a7, 0x00000000);
138 nv_icmd(dev, 0x0002a8, 0x00000000);
139 nv_icmd(dev, 0x0002a9, 0x00000000);
140 nv_icmd(dev, 0x0002aa, 0x00000000);
141 nv_icmd(dev, 0x0002ab, 0x00000000);
142 nv_icmd(dev, 0x0002ac, 0x00000000);
143 nv_icmd(dev, 0x0002ad, 0x00000000);
144 nv_icmd(dev, 0x0002ae, 0x00000000);
145 nv_icmd(dev, 0x0002af, 0x00000000);
146 nv_icmd(dev, 0x000420, 0x00000000);
147 nv_icmd(dev, 0x000421, 0x00000000);
148 nv_icmd(dev, 0x000422, 0x00000000);
149 nv_icmd(dev, 0x000423, 0x00000000);
150 nv_icmd(dev, 0x000424, 0x00000000);
151 nv_icmd(dev, 0x000425, 0x00000000);
152 nv_icmd(dev, 0x000426, 0x00000000);
153 nv_icmd(dev, 0x000427, 0x00000000);
154 nv_icmd(dev, 0x000428, 0x00000000);
155 nv_icmd(dev, 0x000429, 0x00000000);
156 nv_icmd(dev, 0x00042a, 0x00000000);
157 nv_icmd(dev, 0x00042b, 0x00000000);
158 nv_icmd(dev, 0x00042c, 0x00000000);
159 nv_icmd(dev, 0x00042d, 0x00000000);
160 nv_icmd(dev, 0x00042e, 0x00000000);
161 nv_icmd(dev, 0x00042f, 0x00000000);
162 nv_icmd(dev, 0x0002b0, 0x00000000);
163 nv_icmd(dev, 0x0002b1, 0x00000000);
164 nv_icmd(dev, 0x0002b2, 0x00000000);
165 nv_icmd(dev, 0x0002b3, 0x00000000);
166 nv_icmd(dev, 0x0002b4, 0x00000000);
167 nv_icmd(dev, 0x0002b5, 0x00000000);
168 nv_icmd(dev, 0x0002b6, 0x00000000);
169 nv_icmd(dev, 0x0002b7, 0x00000000);
170 nv_icmd(dev, 0x0002b8, 0x00000000);
171 nv_icmd(dev, 0x0002b9, 0x00000000);
172 nv_icmd(dev, 0x0002ba, 0x00000000);
173 nv_icmd(dev, 0x0002bb, 0x00000000);
174 nv_icmd(dev, 0x0002bc, 0x00000000);
175 nv_icmd(dev, 0x0002bd, 0x00000000);
176 nv_icmd(dev, 0x0002be, 0x00000000);
177 nv_icmd(dev, 0x0002bf, 0x00000000);
178 nv_icmd(dev, 0x000430, 0x00000000);
179 nv_icmd(dev, 0x000431, 0x00000000);
180 nv_icmd(dev, 0x000432, 0x00000000);
181 nv_icmd(dev, 0x000433, 0x00000000);
182 nv_icmd(dev, 0x000434, 0x00000000);
183 nv_icmd(dev, 0x000435, 0x00000000);
184 nv_icmd(dev, 0x000436, 0x00000000);
185 nv_icmd(dev, 0x000437, 0x00000000);
186 nv_icmd(dev, 0x000438, 0x00000000);
187 nv_icmd(dev, 0x000439, 0x00000000);
188 nv_icmd(dev, 0x00043a, 0x00000000);
189 nv_icmd(dev, 0x00043b, 0x00000000);
190 nv_icmd(dev, 0x00043c, 0x00000000);
191 nv_icmd(dev, 0x00043d, 0x00000000);
192 nv_icmd(dev, 0x00043e, 0x00000000);
193 nv_icmd(dev, 0x00043f, 0x00000000);
194 nv_icmd(dev, 0x0002c0, 0x00000000);
195 nv_icmd(dev, 0x0002c1, 0x00000000);
196 nv_icmd(dev, 0x0002c2, 0x00000000);
197 nv_icmd(dev, 0x0002c3, 0x00000000);
198 nv_icmd(dev, 0x0002c4, 0x00000000);
199 nv_icmd(dev, 0x0002c5, 0x00000000);
200 nv_icmd(dev, 0x0002c6, 0x00000000);
201 nv_icmd(dev, 0x0002c7, 0x00000000);
202 nv_icmd(dev, 0x0002c8, 0x00000000);
203 nv_icmd(dev, 0x0002c9, 0x00000000);
204 nv_icmd(dev, 0x0002ca, 0x00000000);
205 nv_icmd(dev, 0x0002cb, 0x00000000);
206 nv_icmd(dev, 0x0002cc, 0x00000000);
207 nv_icmd(dev, 0x0002cd, 0x00000000);
208 nv_icmd(dev, 0x0002ce, 0x00000000);
209 nv_icmd(dev, 0x0002cf, 0x00000000);
210 nv_icmd(dev, 0x0004d0, 0x00000000);
211 nv_icmd(dev, 0x0004d1, 0x00000000);
212 nv_icmd(dev, 0x0004d2, 0x00000000);
213 nv_icmd(dev, 0x0004d3, 0x00000000);
214 nv_icmd(dev, 0x0004d4, 0x00000000);
215 nv_icmd(dev, 0x0004d5, 0x00000000);
216 nv_icmd(dev, 0x0004d6, 0x00000000);
217 nv_icmd(dev, 0x0004d7, 0x00000000);
218 nv_icmd(dev, 0x0004d8, 0x00000000);
219 nv_icmd(dev, 0x0004d9, 0x00000000);
220 nv_icmd(dev, 0x0004da, 0x00000000);
221 nv_icmd(dev, 0x0004db, 0x00000000);
222 nv_icmd(dev, 0x0004dc, 0x00000000);
223 nv_icmd(dev, 0x0004dd, 0x00000000);
224 nv_icmd(dev, 0x0004de, 0x00000000);
225 nv_icmd(dev, 0x0004df, 0x00000000);
226 nv_icmd(dev, 0x000720, 0x00000000);
227 nv_icmd(dev, 0x000721, 0x00000000);
228 nv_icmd(dev, 0x000722, 0x00000000);
229 nv_icmd(dev, 0x000723, 0x00000000);
230 nv_icmd(dev, 0x000724, 0x00000000);
231 nv_icmd(dev, 0x000725, 0x00000000);
232 nv_icmd(dev, 0x000726, 0x00000000);
233 nv_icmd(dev, 0x000727, 0x00000000);
234 nv_icmd(dev, 0x000728, 0x00000000);
235 nv_icmd(dev, 0x000729, 0x00000000);
236 nv_icmd(dev, 0x00072a, 0x00000000);
237 nv_icmd(dev, 0x00072b, 0x00000000);
238 nv_icmd(dev, 0x00072c, 0x00000000);
239 nv_icmd(dev, 0x00072d, 0x00000000);
240 nv_icmd(dev, 0x00072e, 0x00000000);
241 nv_icmd(dev, 0x00072f, 0x00000000);
242 nv_icmd(dev, 0x0008c0, 0x00000000);
243 nv_icmd(dev, 0x0008c1, 0x00000000);
244 nv_icmd(dev, 0x0008c2, 0x00000000);
245 nv_icmd(dev, 0x0008c3, 0x00000000);
246 nv_icmd(dev, 0x0008c4, 0x00000000);
247 nv_icmd(dev, 0x0008c5, 0x00000000);
248 nv_icmd(dev, 0x0008c6, 0x00000000);
249 nv_icmd(dev, 0x0008c7, 0x00000000);
250 nv_icmd(dev, 0x0008c8, 0x00000000);
251 nv_icmd(dev, 0x0008c9, 0x00000000);
252 nv_icmd(dev, 0x0008ca, 0x00000000);
253 nv_icmd(dev, 0x0008cb, 0x00000000);
254 nv_icmd(dev, 0x0008cc, 0x00000000);
255 nv_icmd(dev, 0x0008cd, 0x00000000);
256 nv_icmd(dev, 0x0008ce, 0x00000000);
257 nv_icmd(dev, 0x0008cf, 0x00000000);
258 nv_icmd(dev, 0x000890, 0x00000000);
259 nv_icmd(dev, 0x000891, 0x00000000);
260 nv_icmd(dev, 0x000892, 0x00000000);
261 nv_icmd(dev, 0x000893, 0x00000000);
262 nv_icmd(dev, 0x000894, 0x00000000);
263 nv_icmd(dev, 0x000895, 0x00000000);
264 nv_icmd(dev, 0x000896, 0x00000000);
265 nv_icmd(dev, 0x000897, 0x00000000);
266 nv_icmd(dev, 0x000898, 0x00000000);
267 nv_icmd(dev, 0x000899, 0x00000000);
268 nv_icmd(dev, 0x00089a, 0x00000000);
269 nv_icmd(dev, 0x00089b, 0x00000000);
270 nv_icmd(dev, 0x00089c, 0x00000000);
271 nv_icmd(dev, 0x00089d, 0x00000000);
272 nv_icmd(dev, 0x00089e, 0x00000000);
273 nv_icmd(dev, 0x00089f, 0x00000000);
274 nv_icmd(dev, 0x0008e0, 0x00000000);
275 nv_icmd(dev, 0x0008e1, 0x00000000);
276 nv_icmd(dev, 0x0008e2, 0x00000000);
277 nv_icmd(dev, 0x0008e3, 0x00000000);
278 nv_icmd(dev, 0x0008e4, 0x00000000);
279 nv_icmd(dev, 0x0008e5, 0x00000000);
280 nv_icmd(dev, 0x0008e6, 0x00000000);
281 nv_icmd(dev, 0x0008e7, 0x00000000);
282 nv_icmd(dev, 0x0008e8, 0x00000000);
283 nv_icmd(dev, 0x0008e9, 0x00000000);
284 nv_icmd(dev, 0x0008ea, 0x00000000);
285 nv_icmd(dev, 0x0008eb, 0x00000000);
286 nv_icmd(dev, 0x0008ec, 0x00000000);
287 nv_icmd(dev, 0x0008ed, 0x00000000);
288 nv_icmd(dev, 0x0008ee, 0x00000000);
289 nv_icmd(dev, 0x0008ef, 0x00000000);
290 nv_icmd(dev, 0x0008a0, 0x00000000);
291 nv_icmd(dev, 0x0008a1, 0x00000000);
292 nv_icmd(dev, 0x0008a2, 0x00000000);
293 nv_icmd(dev, 0x0008a3, 0x00000000);
294 nv_icmd(dev, 0x0008a4, 0x00000000);
295 nv_icmd(dev, 0x0008a5, 0x00000000);
296 nv_icmd(dev, 0x0008a6, 0x00000000);
297 nv_icmd(dev, 0x0008a7, 0x00000000);
298 nv_icmd(dev, 0x0008a8, 0x00000000);
299 nv_icmd(dev, 0x0008a9, 0x00000000);
300 nv_icmd(dev, 0x0008aa, 0x00000000);
301 nv_icmd(dev, 0x0008ab, 0x00000000);
302 nv_icmd(dev, 0x0008ac, 0x00000000);
303 nv_icmd(dev, 0x0008ad, 0x00000000);
304 nv_icmd(dev, 0x0008ae, 0x00000000);
305 nv_icmd(dev, 0x0008af, 0x00000000);
306 nv_icmd(dev, 0x0008f0, 0x00000000);
307 nv_icmd(dev, 0x0008f1, 0x00000000);
308 nv_icmd(dev, 0x0008f2, 0x00000000);
309 nv_icmd(dev, 0x0008f3, 0x00000000);
310 nv_icmd(dev, 0x0008f4, 0x00000000);
311 nv_icmd(dev, 0x0008f5, 0x00000000);
312 nv_icmd(dev, 0x0008f6, 0x00000000);
313 nv_icmd(dev, 0x0008f7, 0x00000000);
314 nv_icmd(dev, 0x0008f8, 0x00000000);
315 nv_icmd(dev, 0x0008f9, 0x00000000);
316 nv_icmd(dev, 0x0008fa, 0x00000000);
317 nv_icmd(dev, 0x0008fb, 0x00000000);
318 nv_icmd(dev, 0x0008fc, 0x00000000);
319 nv_icmd(dev, 0x0008fd, 0x00000000);
320 nv_icmd(dev, 0x0008fe, 0x00000000);
321 nv_icmd(dev, 0x0008ff, 0x00000000);
322 nv_icmd(dev, 0x00094c, 0x000000ff);
323 nv_icmd(dev, 0x00094d, 0xffffffff);
324 nv_icmd(dev, 0x00094e, 0x00000002);
325 nv_icmd(dev, 0x0002ec, 0x00000001);
326 nv_icmd(dev, 0x000303, 0x00000001);
327 nv_icmd(dev, 0x0002e6, 0x00000001);
328 nv_icmd(dev, 0x000466, 0x00000052);
329 nv_icmd(dev, 0x000301, 0x3f800000);
330 nv_icmd(dev, 0x000304, 0x30201000);
331 nv_icmd(dev, 0x000305, 0x70605040);
332 nv_icmd(dev, 0x000306, 0xb8a89888);
333 nv_icmd(dev, 0x000307, 0xf8e8d8c8);
334 nv_icmd(dev, 0x00030a, 0x00ffff00);
335 nv_icmd(dev, 0x00030b, 0x0000001a);
336 nv_icmd(dev, 0x00030c, 0x00000001);
337 nv_icmd(dev, 0x000318, 0x00000001);
338 nv_icmd(dev, 0x000340, 0x00000000);
339 nv_icmd(dev, 0x000375, 0x00000001);
340 nv_icmd(dev, 0x00037d, 0x00000006);
341 nv_icmd(dev, 0x0003a0, 0x00000002);
342 nv_icmd(dev, 0x0003aa, 0x00000001);
343 nv_icmd(dev, 0x0003a9, 0x00000001);
344 nv_icmd(dev, 0x000380, 0x00000001);
345 nv_icmd(dev, 0x000383, 0x00000011);
346 nv_icmd(dev, 0x000360, 0x00000040);
347 nv_icmd(dev, 0x000366, 0x00000000);
348 nv_icmd(dev, 0x000367, 0x00000000);
349 nv_icmd(dev, 0x000368, 0x00000fff);
350 nv_icmd(dev, 0x000370, 0x00000000);
351 nv_icmd(dev, 0x000371, 0x00000000);
352 nv_icmd(dev, 0x000372, 0x000fffff);
353 nv_icmd(dev, 0x00037a, 0x00000012);
354 nv_icmd(dev, 0x000619, 0x00000003);
355 nv_icmd(dev, 0x000811, 0x00000003);
356 nv_icmd(dev, 0x000812, 0x00000004);
357 nv_icmd(dev, 0x000813, 0x00000006);
358 nv_icmd(dev, 0x000814, 0x00000008);
359 nv_icmd(dev, 0x000815, 0x0000000b);
360 nv_icmd(dev, 0x000800, 0x00000001);
361 nv_icmd(dev, 0x000801, 0x00000001);
362 nv_icmd(dev, 0x000802, 0x00000001);
363 nv_icmd(dev, 0x000803, 0x00000001);
364 nv_icmd(dev, 0x000804, 0x00000001);
365 nv_icmd(dev, 0x000805, 0x00000001);
366 nv_icmd(dev, 0x000632, 0x00000001);
367 nv_icmd(dev, 0x000633, 0x00000002);
368 nv_icmd(dev, 0x000634, 0x00000003);
369 nv_icmd(dev, 0x000635, 0x00000004);
370 nv_icmd(dev, 0x000654, 0x3f800000);
371 nv_icmd(dev, 0x000657, 0x3f800000);
372 nv_icmd(dev, 0x000655, 0x3f800000);
373 nv_icmd(dev, 0x000656, 0x3f800000);
374 nv_icmd(dev, 0x0006cd, 0x3f800000);
375 nv_icmd(dev, 0x0007f5, 0x3f800000);
376 nv_icmd(dev, 0x0007dc, 0x39291909);
377 nv_icmd(dev, 0x0007dd, 0x79695949);
378 nv_icmd(dev, 0x0007de, 0xb9a99989);
379 nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
380 nv_icmd(dev, 0x0007e8, 0x00003210);
381 nv_icmd(dev, 0x0007e9, 0x00007654);
382 nv_icmd(dev, 0x0007ea, 0x00000098);
383 nv_icmd(dev, 0x0007ec, 0x39291909);
384 nv_icmd(dev, 0x0007ed, 0x79695949);
385 nv_icmd(dev, 0x0007ee, 0xb9a99989);
386 nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
387 nv_icmd(dev, 0x0007f0, 0x00003210);
388 nv_icmd(dev, 0x0007f1, 0x00007654);
389 nv_icmd(dev, 0x0007f2, 0x00000098);
390 nv_icmd(dev, 0x0005a5, 0x00000001);
391 nv_icmd(dev, 0x000980, 0x00000000);
392 nv_icmd(dev, 0x000981, 0x00000000);
393 nv_icmd(dev, 0x000982, 0x00000000);
394 nv_icmd(dev, 0x000983, 0x00000000);
395 nv_icmd(dev, 0x000984, 0x00000000);
396 nv_icmd(dev, 0x000985, 0x00000000);
397 nv_icmd(dev, 0x000986, 0x00000000);
398 nv_icmd(dev, 0x000987, 0x00000000);
399 nv_icmd(dev, 0x000988, 0x00000000);
400 nv_icmd(dev, 0x000989, 0x00000000);
401 nv_icmd(dev, 0x00098a, 0x00000000);
402 nv_icmd(dev, 0x00098b, 0x00000000);
403 nv_icmd(dev, 0x00098c, 0x00000000);
404 nv_icmd(dev, 0x00098d, 0x00000000);
405 nv_icmd(dev, 0x00098e, 0x00000000);
406 nv_icmd(dev, 0x00098f, 0x00000000);
407 nv_icmd(dev, 0x000990, 0x00000000);
408 nv_icmd(dev, 0x000991, 0x00000000);
409 nv_icmd(dev, 0x000992, 0x00000000);
410 nv_icmd(dev, 0x000993, 0x00000000);
411 nv_icmd(dev, 0x000994, 0x00000000);
412 nv_icmd(dev, 0x000995, 0x00000000);
413 nv_icmd(dev, 0x000996, 0x00000000);
414 nv_icmd(dev, 0x000997, 0x00000000);
415 nv_icmd(dev, 0x000998, 0x00000000);
416 nv_icmd(dev, 0x000999, 0x00000000);
417 nv_icmd(dev, 0x00099a, 0x00000000);
418 nv_icmd(dev, 0x00099b, 0x00000000);
419 nv_icmd(dev, 0x00099c, 0x00000000);
420 nv_icmd(dev, 0x00099d, 0x00000000);
421 nv_icmd(dev, 0x00099e, 0x00000000);
422 nv_icmd(dev, 0x00099f, 0x00000000);
423 nv_icmd(dev, 0x0009a0, 0x00000000);
424 nv_icmd(dev, 0x0009a1, 0x00000000);
425 nv_icmd(dev, 0x0009a2, 0x00000000);
426 nv_icmd(dev, 0x0009a3, 0x00000000);
427 nv_icmd(dev, 0x0009a4, 0x00000000);
428 nv_icmd(dev, 0x0009a5, 0x00000000);
429 nv_icmd(dev, 0x0009a6, 0x00000000);
430 nv_icmd(dev, 0x0009a7, 0x00000000);
431 nv_icmd(dev, 0x0009a8, 0x00000000);
432 nv_icmd(dev, 0x0009a9, 0x00000000);
433 nv_icmd(dev, 0x0009aa, 0x00000000);
434 nv_icmd(dev, 0x0009ab, 0x00000000);
435 nv_icmd(dev, 0x0009ac, 0x00000000);
436 nv_icmd(dev, 0x0009ad, 0x00000000);
437 nv_icmd(dev, 0x0009ae, 0x00000000);
438 nv_icmd(dev, 0x0009af, 0x00000000);
439 nv_icmd(dev, 0x0009b0, 0x00000000);
440 nv_icmd(dev, 0x0009b1, 0x00000000);
441 nv_icmd(dev, 0x0009b2, 0x00000000);
442 nv_icmd(dev, 0x0009b3, 0x00000000);
443 nv_icmd(dev, 0x0009b4, 0x00000000);
444 nv_icmd(dev, 0x0009b5, 0x00000000);
445 nv_icmd(dev, 0x0009b6, 0x00000000);
446 nv_icmd(dev, 0x0009b7, 0x00000000);
447 nv_icmd(dev, 0x0009b8, 0x00000000);
448 nv_icmd(dev, 0x0009b9, 0x00000000);
449 nv_icmd(dev, 0x0009ba, 0x00000000);
450 nv_icmd(dev, 0x0009bb, 0x00000000);
451 nv_icmd(dev, 0x0009bc, 0x00000000);
452 nv_icmd(dev, 0x0009bd, 0x00000000);
453 nv_icmd(dev, 0x0009be, 0x00000000);
454 nv_icmd(dev, 0x0009bf, 0x00000000);
455 nv_icmd(dev, 0x0009c0, 0x00000000);
456 nv_icmd(dev, 0x0009c1, 0x00000000);
457 nv_icmd(dev, 0x0009c2, 0x00000000);
458 nv_icmd(dev, 0x0009c3, 0x00000000);
459 nv_icmd(dev, 0x0009c4, 0x00000000);
460 nv_icmd(dev, 0x0009c5, 0x00000000);
461 nv_icmd(dev, 0x0009c6, 0x00000000);
462 nv_icmd(dev, 0x0009c7, 0x00000000);
463 nv_icmd(dev, 0x0009c8, 0x00000000);
464 nv_icmd(dev, 0x0009c9, 0x00000000);
465 nv_icmd(dev, 0x0009ca, 0x00000000);
466 nv_icmd(dev, 0x0009cb, 0x00000000);
467 nv_icmd(dev, 0x0009cc, 0x00000000);
468 nv_icmd(dev, 0x0009cd, 0x00000000);
469 nv_icmd(dev, 0x0009ce, 0x00000000);
470 nv_icmd(dev, 0x0009cf, 0x00000000);
471 nv_icmd(dev, 0x0009d0, 0x00000000);
472 nv_icmd(dev, 0x0009d1, 0x00000000);
473 nv_icmd(dev, 0x0009d2, 0x00000000);
474 nv_icmd(dev, 0x0009d3, 0x00000000);
475 nv_icmd(dev, 0x0009d4, 0x00000000);
476 nv_icmd(dev, 0x0009d5, 0x00000000);
477 nv_icmd(dev, 0x0009d6, 0x00000000);
478 nv_icmd(dev, 0x0009d7, 0x00000000);
479 nv_icmd(dev, 0x0009d8, 0x00000000);
480 nv_icmd(dev, 0x0009d9, 0x00000000);
481 nv_icmd(dev, 0x0009da, 0x00000000);
482 nv_icmd(dev, 0x0009db, 0x00000000);
483 nv_icmd(dev, 0x0009dc, 0x00000000);
484 nv_icmd(dev, 0x0009dd, 0x00000000);
485 nv_icmd(dev, 0x0009de, 0x00000000);
486 nv_icmd(dev, 0x0009df, 0x00000000);
487 nv_icmd(dev, 0x0009e0, 0x00000000);
488 nv_icmd(dev, 0x0009e1, 0x00000000);
489 nv_icmd(dev, 0x0009e2, 0x00000000);
490 nv_icmd(dev, 0x0009e3, 0x00000000);
491 nv_icmd(dev, 0x0009e4, 0x00000000);
492 nv_icmd(dev, 0x0009e5, 0x00000000);
493 nv_icmd(dev, 0x0009e6, 0x00000000);
494 nv_icmd(dev, 0x0009e7, 0x00000000);
495 nv_icmd(dev, 0x0009e8, 0x00000000);
496 nv_icmd(dev, 0x0009e9, 0x00000000);
497 nv_icmd(dev, 0x0009ea, 0x00000000);
498 nv_icmd(dev, 0x0009eb, 0x00000000);
499 nv_icmd(dev, 0x0009ec, 0x00000000);
500 nv_icmd(dev, 0x0009ed, 0x00000000);
501 nv_icmd(dev, 0x0009ee, 0x00000000);
502 nv_icmd(dev, 0x0009ef, 0x00000000);
503 nv_icmd(dev, 0x0009f0, 0x00000000);
504 nv_icmd(dev, 0x0009f1, 0x00000000);
505 nv_icmd(dev, 0x0009f2, 0x00000000);
506 nv_icmd(dev, 0x0009f3, 0x00000000);
507 nv_icmd(dev, 0x0009f4, 0x00000000);
508 nv_icmd(dev, 0x0009f5, 0x00000000);
509 nv_icmd(dev, 0x0009f6, 0x00000000);
510 nv_icmd(dev, 0x0009f7, 0x00000000);
511 nv_icmd(dev, 0x0009f8, 0x00000000);
512 nv_icmd(dev, 0x0009f9, 0x00000000);
513 nv_icmd(dev, 0x0009fa, 0x00000000);
514 nv_icmd(dev, 0x0009fb, 0x00000000);
515 nv_icmd(dev, 0x0009fc, 0x00000000);
516 nv_icmd(dev, 0x0009fd, 0x00000000);
517 nv_icmd(dev, 0x0009fe, 0x00000000);
518 nv_icmd(dev, 0x0009ff, 0x00000000);
519 nv_icmd(dev, 0x000468, 0x00000004);
520 nv_icmd(dev, 0x00046c, 0x00000001);
521 nv_icmd(dev, 0x000470, 0x00000000);
522 nv_icmd(dev, 0x000471, 0x00000000);
523 nv_icmd(dev, 0x000472, 0x00000000);
524 nv_icmd(dev, 0x000473, 0x00000000);
525 nv_icmd(dev, 0x000474, 0x00000000);
526 nv_icmd(dev, 0x000475, 0x00000000);
527 nv_icmd(dev, 0x000476, 0x00000000);
528 nv_icmd(dev, 0x000477, 0x00000000);
529 nv_icmd(dev, 0x000478, 0x00000000);
530 nv_icmd(dev, 0x000479, 0x00000000);
531 nv_icmd(dev, 0x00047a, 0x00000000);
532 nv_icmd(dev, 0x00047b, 0x00000000);
533 nv_icmd(dev, 0x00047c, 0x00000000);
534 nv_icmd(dev, 0x00047d, 0x00000000);
535 nv_icmd(dev, 0x00047e, 0x00000000);
536 nv_icmd(dev, 0x00047f, 0x00000000);
537 nv_icmd(dev, 0x000480, 0x00000000);
538 nv_icmd(dev, 0x000481, 0x00000000);
539 nv_icmd(dev, 0x000482, 0x00000000);
540 nv_icmd(dev, 0x000483, 0x00000000);
541 nv_icmd(dev, 0x000484, 0x00000000);
542 nv_icmd(dev, 0x000485, 0x00000000);
543 nv_icmd(dev, 0x000486, 0x00000000);
544 nv_icmd(dev, 0x000487, 0x00000000);
545 nv_icmd(dev, 0x000488, 0x00000000);
546 nv_icmd(dev, 0x000489, 0x00000000);
547 nv_icmd(dev, 0x00048a, 0x00000000);
548 nv_icmd(dev, 0x00048b, 0x00000000);
549 nv_icmd(dev, 0x00048c, 0x00000000);
550 nv_icmd(dev, 0x00048d, 0x00000000);
551 nv_icmd(dev, 0x00048e, 0x00000000);
552 nv_icmd(dev, 0x00048f, 0x00000000);
553 nv_icmd(dev, 0x000490, 0x00000000);
554 nv_icmd(dev, 0x000491, 0x00000000);
555 nv_icmd(dev, 0x000492, 0x00000000);
556 nv_icmd(dev, 0x000493, 0x00000000);
557 nv_icmd(dev, 0x000494, 0x00000000);
558 nv_icmd(dev, 0x000495, 0x00000000);
559 nv_icmd(dev, 0x000496, 0x00000000);
560 nv_icmd(dev, 0x000497, 0x00000000);
561 nv_icmd(dev, 0x000498, 0x00000000);
562 nv_icmd(dev, 0x000499, 0x00000000);
563 nv_icmd(dev, 0x00049a, 0x00000000);
564 nv_icmd(dev, 0x00049b, 0x00000000);
565 nv_icmd(dev, 0x00049c, 0x00000000);
566 nv_icmd(dev, 0x00049d, 0x00000000);
567 nv_icmd(dev, 0x00049e, 0x00000000);
568 nv_icmd(dev, 0x00049f, 0x00000000);
569 nv_icmd(dev, 0x0004a0, 0x00000000);
570 nv_icmd(dev, 0x0004a1, 0x00000000);
571 nv_icmd(dev, 0x0004a2, 0x00000000);
572 nv_icmd(dev, 0x0004a3, 0x00000000);
573 nv_icmd(dev, 0x0004a4, 0x00000000);
574 nv_icmd(dev, 0x0004a5, 0x00000000);
575 nv_icmd(dev, 0x0004a6, 0x00000000);
576 nv_icmd(dev, 0x0004a7, 0x00000000);
577 nv_icmd(dev, 0x0004a8, 0x00000000);
578 nv_icmd(dev, 0x0004a9, 0x00000000);
579 nv_icmd(dev, 0x0004aa, 0x00000000);
580 nv_icmd(dev, 0x0004ab, 0x00000000);
581 nv_icmd(dev, 0x0004ac, 0x00000000);
582 nv_icmd(dev, 0x0004ad, 0x00000000);
583 nv_icmd(dev, 0x0004ae, 0x00000000);
584 nv_icmd(dev, 0x0004af, 0x00000000);
585 nv_icmd(dev, 0x0004b0, 0x00000000);
586 nv_icmd(dev, 0x0004b1, 0x00000000);
587 nv_icmd(dev, 0x0004b2, 0x00000000);
588 nv_icmd(dev, 0x0004b3, 0x00000000);
589 nv_icmd(dev, 0x0004b4, 0x00000000);
590 nv_icmd(dev, 0x0004b5, 0x00000000);
591 nv_icmd(dev, 0x0004b6, 0x00000000);
592 nv_icmd(dev, 0x0004b7, 0x00000000);
593 nv_icmd(dev, 0x0004b8, 0x00000000);
594 nv_icmd(dev, 0x0004b9, 0x00000000);
595 nv_icmd(dev, 0x0004ba, 0x00000000);
596 nv_icmd(dev, 0x0004bb, 0x00000000);
597 nv_icmd(dev, 0x0004bc, 0x00000000);
598 nv_icmd(dev, 0x0004bd, 0x00000000);
599 nv_icmd(dev, 0x0004be, 0x00000000);
600 nv_icmd(dev, 0x0004bf, 0x00000000);
601 nv_icmd(dev, 0x0004c0, 0x00000000);
602 nv_icmd(dev, 0x0004c1, 0x00000000);
603 nv_icmd(dev, 0x0004c2, 0x00000000);
604 nv_icmd(dev, 0x0004c3, 0x00000000);
605 nv_icmd(dev, 0x0004c4, 0x00000000);
606 nv_icmd(dev, 0x0004c5, 0x00000000);
607 nv_icmd(dev, 0x0004c6, 0x00000000);
608 nv_icmd(dev, 0x0004c7, 0x00000000);
609 nv_icmd(dev, 0x0004c8, 0x00000000);
610 nv_icmd(dev, 0x0004c9, 0x00000000);
611 nv_icmd(dev, 0x0004ca, 0x00000000);
612 nv_icmd(dev, 0x0004cb, 0x00000000);
613 nv_icmd(dev, 0x0004cc, 0x00000000);
614 nv_icmd(dev, 0x0004cd, 0x00000000);
615 nv_icmd(dev, 0x0004ce, 0x00000000);
616 nv_icmd(dev, 0x0004cf, 0x00000000);
617 nv_icmd(dev, 0x000510, 0x3f800000);
618 nv_icmd(dev, 0x000511, 0x3f800000);
619 nv_icmd(dev, 0x000512, 0x3f800000);
620 nv_icmd(dev, 0x000513, 0x3f800000);
621 nv_icmd(dev, 0x000514, 0x3f800000);
622 nv_icmd(dev, 0x000515, 0x3f800000);
623 nv_icmd(dev, 0x000516, 0x3f800000);
624 nv_icmd(dev, 0x000517, 0x3f800000);
625 nv_icmd(dev, 0x000518, 0x3f800000);
626 nv_icmd(dev, 0x000519, 0x3f800000);
627 nv_icmd(dev, 0x00051a, 0x3f800000);
628 nv_icmd(dev, 0x00051b, 0x3f800000);
629 nv_icmd(dev, 0x00051c, 0x3f800000);
630 nv_icmd(dev, 0x00051d, 0x3f800000);
631 nv_icmd(dev, 0x00051e, 0x3f800000);
632 nv_icmd(dev, 0x00051f, 0x3f800000);
633 nv_icmd(dev, 0x000520, 0x000002b6);
634 nv_icmd(dev, 0x000529, 0x00000001);
635 nv_icmd(dev, 0x000530, 0xffff0000);
636 nv_icmd(dev, 0x000531, 0xffff0000);
637 nv_icmd(dev, 0x000532, 0xffff0000);
638 nv_icmd(dev, 0x000533, 0xffff0000);
639 nv_icmd(dev, 0x000534, 0xffff0000);
640 nv_icmd(dev, 0x000535, 0xffff0000);
641 nv_icmd(dev, 0x000536, 0xffff0000);
642 nv_icmd(dev, 0x000537, 0xffff0000);
643 nv_icmd(dev, 0x000538, 0xffff0000);
644 nv_icmd(dev, 0x000539, 0xffff0000);
645 nv_icmd(dev, 0x00053a, 0xffff0000);
646 nv_icmd(dev, 0x00053b, 0xffff0000);
647 nv_icmd(dev, 0x00053c, 0xffff0000);
648 nv_icmd(dev, 0x00053d, 0xffff0000);
649 nv_icmd(dev, 0x00053e, 0xffff0000);
650 nv_icmd(dev, 0x00053f, 0xffff0000);
651 nv_icmd(dev, 0x000585, 0x0000003f);
652 nv_icmd(dev, 0x000576, 0x00000003);
653 nv_icmd(dev, 0x00057b, 0x00000059);
654 nv_icmd(dev, 0x000586, 0x00000040);
655 nv_icmd(dev, 0x000582, 0x00000080);
656 nv_icmd(dev, 0x000583, 0x00000080);
657 nv_icmd(dev, 0x0005c2, 0x00000001);
658 nv_icmd(dev, 0x000638, 0x00000001);
659 nv_icmd(dev, 0x000639, 0x00000001);
660 nv_icmd(dev, 0x00063a, 0x00000002);
661 nv_icmd(dev, 0x00063b, 0x00000001);
662 nv_icmd(dev, 0x00063c, 0x00000001);
663 nv_icmd(dev, 0x00063d, 0x00000002);
664 nv_icmd(dev, 0x00063e, 0x00000001);
665 nv_icmd(dev, 0x0008b8, 0x00000001);
666 nv_icmd(dev, 0x0008b9, 0x00000001);
667 nv_icmd(dev, 0x0008ba, 0x00000001);
668 nv_icmd(dev, 0x0008bb, 0x00000001);
669 nv_icmd(dev, 0x0008bc, 0x00000001);
670 nv_icmd(dev, 0x0008bd, 0x00000001);
671 nv_icmd(dev, 0x0008be, 0x00000001);
672 nv_icmd(dev, 0x0008bf, 0x00000001);
673 nv_icmd(dev, 0x000900, 0x00000001);
674 nv_icmd(dev, 0x000901, 0x00000001);
675 nv_icmd(dev, 0x000902, 0x00000001);
676 nv_icmd(dev, 0x000903, 0x00000001);
677 nv_icmd(dev, 0x000904, 0x00000001);
678 nv_icmd(dev, 0x000905, 0x00000001);
679 nv_icmd(dev, 0x000906, 0x00000001);
680 nv_icmd(dev, 0x000907, 0x00000001);
681 nv_icmd(dev, 0x000908, 0x00000002);
682 nv_icmd(dev, 0x000909, 0x00000002);
683 nv_icmd(dev, 0x00090a, 0x00000002);
684 nv_icmd(dev, 0x00090b, 0x00000002);
685 nv_icmd(dev, 0x00090c, 0x00000002);
686 nv_icmd(dev, 0x00090d, 0x00000002);
687 nv_icmd(dev, 0x00090e, 0x00000002);
688 nv_icmd(dev, 0x00090f, 0x00000002);
689 nv_icmd(dev, 0x000910, 0x00000001);
690 nv_icmd(dev, 0x000911, 0x00000001);
691 nv_icmd(dev, 0x000912, 0x00000001);
692 nv_icmd(dev, 0x000913, 0x00000001);
693 nv_icmd(dev, 0x000914, 0x00000001);
694 nv_icmd(dev, 0x000915, 0x00000001);
695 nv_icmd(dev, 0x000916, 0x00000001);
696 nv_icmd(dev, 0x000917, 0x00000001);
697 nv_icmd(dev, 0x000918, 0x00000001);
698 nv_icmd(dev, 0x000919, 0x00000001);
699 nv_icmd(dev, 0x00091a, 0x00000001);
700 nv_icmd(dev, 0x00091b, 0x00000001);
701 nv_icmd(dev, 0x00091c, 0x00000001);
702 nv_icmd(dev, 0x00091d, 0x00000001);
703 nv_icmd(dev, 0x00091e, 0x00000001);
704 nv_icmd(dev, 0x00091f, 0x00000001);
705 nv_icmd(dev, 0x000920, 0x00000002);
706 nv_icmd(dev, 0x000921, 0x00000002);
707 nv_icmd(dev, 0x000922, 0x00000002);
708 nv_icmd(dev, 0x000923, 0x00000002);
709 nv_icmd(dev, 0x000924, 0x00000002);
710 nv_icmd(dev, 0x000925, 0x00000002);
711 nv_icmd(dev, 0x000926, 0x00000002);
712 nv_icmd(dev, 0x000927, 0x00000002);
713 nv_icmd(dev, 0x000928, 0x00000001);
714 nv_icmd(dev, 0x000929, 0x00000001);
715 nv_icmd(dev, 0x00092a, 0x00000001);
716 nv_icmd(dev, 0x00092b, 0x00000001);
717 nv_icmd(dev, 0x00092c, 0x00000001);
718 nv_icmd(dev, 0x00092d, 0x00000001);
719 nv_icmd(dev, 0x00092e, 0x00000001);
720 nv_icmd(dev, 0x00092f, 0x00000001);
721 nv_icmd(dev, 0x000648, 0x00000001);
722 nv_icmd(dev, 0x000649, 0x00000001);
723 nv_icmd(dev, 0x00064a, 0x00000001);
724 nv_icmd(dev, 0x00064b, 0x00000001);
725 nv_icmd(dev, 0x00064c, 0x00000001);
726 nv_icmd(dev, 0x00064d, 0x00000001);
727 nv_icmd(dev, 0x00064e, 0x00000001);
728 nv_icmd(dev, 0x00064f, 0x00000001);
729 nv_icmd(dev, 0x000650, 0x00000001);
730 nv_icmd(dev, 0x000658, 0x0000000f);
731 nv_icmd(dev, 0x0007ff, 0x0000000a);
732 nv_icmd(dev, 0x00066a, 0x40000000);
733 nv_icmd(dev, 0x00066b, 0x10000000);
734 nv_icmd(dev, 0x00066c, 0xffff0000);
735 nv_icmd(dev, 0x00066d, 0xffff0000);
736 nv_icmd(dev, 0x0007af, 0x00000008);
737 nv_icmd(dev, 0x0007b0, 0x00000008);
738 nv_icmd(dev, 0x0007f6, 0x00000001);
739 nv_icmd(dev, 0x0006b2, 0x00000055);
740 nv_icmd(dev, 0x0007ad, 0x00000003);
741 nv_icmd(dev, 0x000937, 0x00000001);
742 nv_icmd(dev, 0x000971, 0x00000008);
743 nv_icmd(dev, 0x000972, 0x00000040);
744 nv_icmd(dev, 0x000973, 0x0000012c);
745 nv_icmd(dev, 0x00097c, 0x00000040);
746 nv_icmd(dev, 0x000979, 0x00000003);
747 nv_icmd(dev, 0x000975, 0x00000020);
748 nv_icmd(dev, 0x000976, 0x00000001);
749 nv_icmd(dev, 0x000977, 0x00000020);
750 nv_icmd(dev, 0x000978, 0x00000001);
751 nv_icmd(dev, 0x000957, 0x00000003);
752 nv_icmd(dev, 0x00095e, 0x20164010);
753 nv_icmd(dev, 0x00095f, 0x00000020);
754 nv_icmd(dev, 0x00097d, 0x00000020);
755 nv_icmd(dev, 0x000683, 0x00000006);
756 nv_icmd(dev, 0x000685, 0x003fffff);
757 nv_icmd(dev, 0x000687, 0x003fffff);
758 nv_icmd(dev, 0x0006a0, 0x00000005);
759 nv_icmd(dev, 0x000840, 0x00400008);
760 nv_icmd(dev, 0x000841, 0x08000080);
761 nv_icmd(dev, 0x000842, 0x00400008);
762 nv_icmd(dev, 0x000843, 0x08000080);
763 nv_icmd(dev, 0x000818, 0x00000000);
764 nv_icmd(dev, 0x000819, 0x00000000);
765 nv_icmd(dev, 0x00081a, 0x00000000);
766 nv_icmd(dev, 0x00081b, 0x00000000);
767 nv_icmd(dev, 0x00081c, 0x00000000);
768 nv_icmd(dev, 0x00081d, 0x00000000);
769 nv_icmd(dev, 0x00081e, 0x00000000);
770 nv_icmd(dev, 0x00081f, 0x00000000);
771 nv_icmd(dev, 0x000848, 0x00000000);
772 nv_icmd(dev, 0x000849, 0x00000000);
773 nv_icmd(dev, 0x00084a, 0x00000000);
774 nv_icmd(dev, 0x00084b, 0x00000000);
775 nv_icmd(dev, 0x00084c, 0x00000000);
776 nv_icmd(dev, 0x00084d, 0x00000000);
777 nv_icmd(dev, 0x00084e, 0x00000000);
778 nv_icmd(dev, 0x00084f, 0x00000000);
779 nv_icmd(dev, 0x000850, 0x00000000);
780 nv_icmd(dev, 0x000851, 0x00000000);
781 nv_icmd(dev, 0x000852, 0x00000000);
782 nv_icmd(dev, 0x000853, 0x00000000);
783 nv_icmd(dev, 0x000854, 0x00000000);
784 nv_icmd(dev, 0x000855, 0x00000000);
785 nv_icmd(dev, 0x000856, 0x00000000);
786 nv_icmd(dev, 0x000857, 0x00000000);
787 nv_icmd(dev, 0x000738, 0x00000000);
788 nv_icmd(dev, 0x0006aa, 0x00000001);
789 nv_icmd(dev, 0x0006ab, 0x00000002);
790 nv_icmd(dev, 0x0006ac, 0x00000080);
791 nv_icmd(dev, 0x0006ad, 0x00000100);
792 nv_icmd(dev, 0x0006ae, 0x00000100);
793 nv_icmd(dev, 0x0006b1, 0x00000011);
794 nv_icmd(dev, 0x0006bb, 0x000000cf);
795 nv_icmd(dev, 0x0006ce, 0x2a712488);
796 nv_icmd(dev, 0x000739, 0x4085c000);
797 nv_icmd(dev, 0x00073a, 0x00000080);
798 nv_icmd(dev, 0x000786, 0x80000100);
799 nv_icmd(dev, 0x00073c, 0x00010100);
800 nv_icmd(dev, 0x00073d, 0x02800000);
801 nv_icmd(dev, 0x000787, 0x000000cf);
802 nv_icmd(dev, 0x00078c, 0x00000008);
803 nv_icmd(dev, 0x000792, 0x00000001);
804 nv_icmd(dev, 0x000794, 0x00000001);
805 nv_icmd(dev, 0x000795, 0x00000001);
806 nv_icmd(dev, 0x000796, 0x00000001);
807 nv_icmd(dev, 0x000797, 0x000000cf);
808 nv_icmd(dev, 0x000836, 0x00000001);
809 nv_icmd(dev, 0x00079a, 0x00000002);
810 nv_icmd(dev, 0x000833, 0x04444480);
811 nv_icmd(dev, 0x0007a1, 0x00000001);
812 nv_icmd(dev, 0x0007a3, 0x00000001);
813 nv_icmd(dev, 0x0007a4, 0x00000001);
814 nv_icmd(dev, 0x0007a5, 0x00000001);
815 nv_icmd(dev, 0x000831, 0x00000004);
816 nv_icmd(dev, 0x000b07, 0x00000002);
817 nv_icmd(dev, 0x000b08, 0x00000100);
818 nv_icmd(dev, 0x000b09, 0x00000100);
819 nv_icmd(dev, 0x000b0a, 0x00000001);
820 nv_icmd(dev, 0x000a04, 0x000000ff);
821 nv_icmd(dev, 0x000a0b, 0x00000040);
822 nv_icmd(dev, 0x00097f, 0x00000100);
823 nv_icmd(dev, 0x000a02, 0x00000001);
824 nv_icmd(dev, 0x000809, 0x00000007);
825 nv_icmd(dev, 0x00c221, 0x00000040);
826 nv_icmd(dev, 0x00c1b0, 0x0000000f);
827 nv_icmd(dev, 0x00c1b1, 0x0000000f);
828 nv_icmd(dev, 0x00c1b2, 0x0000000f);
829 nv_icmd(dev, 0x00c1b3, 0x0000000f);
830 nv_icmd(dev, 0x00c1b4, 0x0000000f);
831 nv_icmd(dev, 0x00c1b5, 0x0000000f);
832 nv_icmd(dev, 0x00c1b6, 0x0000000f);
833 nv_icmd(dev, 0x00c1b7, 0x0000000f);
834 nv_icmd(dev, 0x00c1b8, 0x0fac6881);
835 nv_icmd(dev, 0x00c1b9, 0x00fac688);
836 nv_icmd(dev, 0x00c401, 0x00000001);
837 nv_icmd(dev, 0x00c402, 0x00010001);
838 nv_icmd(dev, 0x00c403, 0x00000001);
839 nv_icmd(dev, 0x00c404, 0x00000001);
840 nv_icmd(dev, 0x00c40e, 0x00000020);
841 nv_icmd(dev, 0x00c500, 0x00000003);
842 nv_icmd(dev, 0x01e100, 0x00000001);
843 nv_icmd(dev, 0x001000, 0x00000002);
844 nv_icmd(dev, 0x0006aa, 0x00000001);
845 nv_icmd(dev, 0x0006ad, 0x00000100);
846 nv_icmd(dev, 0x0006ae, 0x00000100);
847 nv_icmd(dev, 0x0006b1, 0x00000011);
848 nv_icmd(dev, 0x00078c, 0x00000008);
849 nv_icmd(dev, 0x000792, 0x00000001);
850 nv_icmd(dev, 0x000794, 0x00000001);
851 nv_icmd(dev, 0x000795, 0x00000001);
852 nv_icmd(dev, 0x000796, 0x00000001);
853 nv_icmd(dev, 0x000797, 0x000000cf);
854 nv_icmd(dev, 0x00079a, 0x00000002);
855 nv_icmd(dev, 0x000833, 0x04444480);
856 nv_icmd(dev, 0x0007a1, 0x00000001);
857 nv_icmd(dev, 0x0007a3, 0x00000001);
858 nv_icmd(dev, 0x0007a4, 0x00000001);
859 nv_icmd(dev, 0x0007a5, 0x00000001);
860 nv_icmd(dev, 0x000831, 0x00000004);
861 nv_icmd(dev, 0x01e100, 0x00000001);
862 nv_icmd(dev, 0x001000, 0x00000008);
863 nv_icmd(dev, 0x000039, 0x00000000);
864 nv_icmd(dev, 0x00003a, 0x00000000);
865 nv_icmd(dev, 0x00003b, 0x00000000);
866 nv_icmd(dev, 0x000380, 0x00000001);
867 nv_icmd(dev, 0x000366, 0x00000000);
868 nv_icmd(dev, 0x000367, 0x00000000);
869 nv_icmd(dev, 0x000368, 0x00000fff);
870 nv_icmd(dev, 0x000370, 0x00000000);
871 nv_icmd(dev, 0x000371, 0x00000000);
872 nv_icmd(dev, 0x000372, 0x000fffff);
873 nv_icmd(dev, 0x000813, 0x00000006);
874 nv_icmd(dev, 0x000814, 0x00000008);
875 nv_icmd(dev, 0x000957, 0x00000003);
876 nv_icmd(dev, 0x000818, 0x00000000);
877 nv_icmd(dev, 0x000819, 0x00000000);
878 nv_icmd(dev, 0x00081a, 0x00000000);
879 nv_icmd(dev, 0x00081b, 0x00000000);
880 nv_icmd(dev, 0x00081c, 0x00000000);
881 nv_icmd(dev, 0x00081d, 0x00000000);
882 nv_icmd(dev, 0x00081e, 0x00000000);
883 nv_icmd(dev, 0x00081f, 0x00000000);
884 nv_icmd(dev, 0x000848, 0x00000000);
885 nv_icmd(dev, 0x000849, 0x00000000);
886 nv_icmd(dev, 0x00084a, 0x00000000);
887 nv_icmd(dev, 0x00084b, 0x00000000);
888 nv_icmd(dev, 0x00084c, 0x00000000);
889 nv_icmd(dev, 0x00084d, 0x00000000);
890 nv_icmd(dev, 0x00084e, 0x00000000);
891 nv_icmd(dev, 0x00084f, 0x00000000);
892 nv_icmd(dev, 0x000850, 0x00000000);
893 nv_icmd(dev, 0x000851, 0x00000000);
894 nv_icmd(dev, 0x000852, 0x00000000);
895 nv_icmd(dev, 0x000853, 0x00000000);
896 nv_icmd(dev, 0x000854, 0x00000000);
897 nv_icmd(dev, 0x000855, 0x00000000);
898 nv_icmd(dev, 0x000856, 0x00000000);
899 nv_icmd(dev, 0x000857, 0x00000000);
900 nv_icmd(dev, 0x000738, 0x00000000);
901 nv_icmd(dev, 0x000b07, 0x00000002);
902 nv_icmd(dev, 0x000b08, 0x00000100);
903 nv_icmd(dev, 0x000b09, 0x00000100);
904 nv_icmd(dev, 0x000b0a, 0x00000001);
905 nv_icmd(dev, 0x000a04, 0x000000ff);
906 nv_icmd(dev, 0x00097f, 0x00000100);
907 nv_icmd(dev, 0x000a02, 0x00000001);
908 nv_icmd(dev, 0x000809, 0x00000007);
909 nv_icmd(dev, 0x00c221, 0x00000040);
910 nv_icmd(dev, 0x00c401, 0x00000001);
911 nv_icmd(dev, 0x00c402, 0x00010001);
912 nv_icmd(dev, 0x00c403, 0x00000001);
913 nv_icmd(dev, 0x00c404, 0x00000001);
914 nv_icmd(dev, 0x00c40e, 0x00000020);
915 nv_icmd(dev, 0x00c500, 0x00000003);
916 nv_icmd(dev, 0x01e100, 0x00000001);
917 nv_icmd(dev, 0x001000, 0x00000001);
918 nv_icmd(dev, 0x000b07, 0x00000002);
919 nv_icmd(dev, 0x000b08, 0x00000100);
920 nv_icmd(dev, 0x000b09, 0x00000100);
921 nv_icmd(dev, 0x000b0a, 0x00000001);
922 nv_icmd(dev, 0x01e100, 0x00000001);
923 nv_wr32(dev, 0x400208, 0x00000000);
924}
925
926static void
927nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
928{
929 nv_wr32(dev, 0x40448c, data);
930 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
931}
932
933static void
934nve0_grctx_generate_a097(struct drm_device *dev)
935{
936 nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
937 nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
938 nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
939 nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
940 nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
941 nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
942 nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
943 nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
944 nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
945 nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
946 nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
947 nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
948 nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
949 nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
950 nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
951 nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
952 nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
953 nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
954 nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
955 nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
956 nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
957 nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
958 nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
959 nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
960 nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
961 nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
962 nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
963 nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
964 nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
965 nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
966 nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
967 nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
968 nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
969 nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
970 nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
971 nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
972 nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
973 nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
974 nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
975 nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
976 nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
977 nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
978 nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
979 nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
980 nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
981 nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
982 nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
983 nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
984 nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
985 nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
986 nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
987 nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
988 nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
989 nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
990 nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
991 nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
992 nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
993 nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
994 nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
995 nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
996 nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
997 nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
998 nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
999 nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
1000 nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
1001 nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
1002 nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
1003 nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
1004 nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
1005 nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
1006 nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
1007 nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
1008 nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
1009 nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
1010 nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
1011 nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
1012 nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
1013 nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
1014 nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
1015 nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
1016 nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
1017 nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
1018 nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
1019 nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
1020 nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
1021 nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
1022 nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
1023 nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
1024 nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
1025 nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
1026 nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
1027 nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
1028 nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
1029 nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
1030 nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
1031 nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
1032 nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
1033 nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
1034 nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
1035 nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
1036 nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
1037 nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
1038 nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
1039 nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
1040 nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
1041 nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
1042 nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
1043 nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
1044 nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
1045 nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
1046 nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
1047 nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
1048 nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
1049 nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
1050 nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
1051 nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
1052 nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
1053 nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
1054 nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
1055 nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
1056 nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
1057 nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
1058 nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
1059 nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
1060 nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
1061 nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
1062 nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
1063 nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
1064 nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
1065 nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
1066 nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
1067 nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
1068 nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
1069 nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
1070 nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
1071 nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
1072 nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
1073 nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
1074 nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
1075 nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
1076 nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
1077 nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
1078 nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
1079 nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
1080 nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
1081 nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
1082 nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
1083 nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
1084 nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
1085 nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
1086 nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
1087 nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
1088 nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
1089 nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
1090 nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
1091 nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
1092 nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
1093 nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
1094 nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
1095 nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
1096 nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
1097 nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
1098 nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
1099 nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
1100 nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
1101 nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
1102 nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
1103 nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
1104 nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
1105 nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
1106 nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
1107 nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
1108 nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
1109 nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
1110 nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
1111 nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
1112 nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
1113 nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
1114 nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
1115 nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
1116 nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
1117 nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
1118 nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
1119 nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
1120 nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
1121 nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
1122 nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
1123 nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
1124 nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
1125 nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
1126 nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
1127 nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
1128 nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
1129 nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
1130 nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
1131 nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
1132 nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
1133 nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
1134 nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
1135 nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
1136 nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
1137 nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
1138 nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
1139 nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
1140 nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
1141 nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
1142 nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
1143 nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
1144 nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
1145 nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
1146 nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
1147 nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
1148 nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
1149 nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
1150 nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
1151 nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
1152 nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
1153 nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
1154 nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
1155 nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
1156 nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
1157 nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
1158 nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
1159 nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
1160 nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
1161 nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
1162 nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
1163 nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
1164 nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
1165 nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
1166 nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
1167 nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
1168 nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
1169 nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
1170 nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
1171 nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
1172 nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
1173 nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
1174 nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
1175 nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
1176 nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
1177 nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
1178 nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
1179 nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
1180 nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
1181 nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
1182 nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
1183 nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
1184 nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
1185 nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
1186 nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
1187 nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
1188 nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
1189 nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
1190 nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
1191 nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
1192 nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
1193 nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
1194 nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
1195 nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
1196 nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
1197 nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
1198 nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
1199 nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
1200 nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
1201 nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
1202 nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
1203 nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
1204 nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
1205 nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
1206 nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
1207 nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
1208 nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
1209 nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
1210 nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
1211 nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
1212 nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
1213 nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
1214 nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
1215 nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
1216 nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
1217 nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
1218 nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
1219 nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
1220 nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
1221 nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
1222 nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
1223 nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
1224 nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
1225 nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
1226 nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
1227 nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
1228 nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
1229 nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
1230 nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
1231 nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
1232 nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
1233 nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
1234 nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
1235 nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
1236 nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
1237 nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
1238 nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
1239 nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
1240 nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
1241 nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
1242 nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
1243 nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
1244 nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
1245 nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
1246 nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
1247 nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
1248 nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
1249 nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
1250 nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
1251 nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
1252 nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
1253 nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
1254 nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
1255 nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
1256 nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
1257 nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
1258 nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
1259 nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
1260 nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
1261 nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
1262 nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
1263 nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
1264 nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
1265 nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
1266 nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
1267 nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
1268 nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
1269 nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
1270 nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
1271 nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
1272 nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
1273 nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
1274 nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
1275 nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
1276 nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
1277 nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
1278 nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
1279 nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
1280 nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
1281 nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
1282 nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
1283 nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
1284 nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
1285 nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
1286 nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
1287 nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
1288 nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
1289 nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
1290 nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
1291 nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
1292 nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
1293 nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
1294 nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
1295 nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
1296 nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
1297 nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
1298 nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
1299 nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
1300 nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
1301 nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
1302 nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
1303 nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
1304 nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
1305 nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
1306 nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
1307 nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
1308 nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
1309 nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
1310 nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
1311 nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
1312 nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
1313 nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
1314 nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
1315 nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
1316 nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
1317 nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
1318 nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
1319 nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
1320 nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
1321 nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
1322 nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
1323 nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
1324 nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
1325 nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
1326 nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
1327 nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
1328 nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
1329 nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
1330 nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
1331 nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
1332 nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
1333 nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
1334 nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
1335 nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
1336 nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
1337 nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
1338 nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
1339 nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
1340 nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
1341 nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
1342 nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
1343 nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
1344 nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
1345 nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
1346 nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
1347 nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
1348 nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
1349 nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
1350 nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
1351 nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
1352 nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
1353 nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
1354 nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
1355 nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
1356 nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
1357 nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
1358 nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
1359 nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
1360 nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
1361 nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
1362 nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
1363 nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
1364 nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
1365 nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
1366 nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
1367 nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
1368 nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
1369 nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
1370 nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
1371 nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
1372 nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
1373 nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
1374 nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
1375 nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
1376 nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
1377 nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
1378 nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
1379 nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
1380 nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
1381 nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
1382 nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
1383 nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
1384 nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
1385 nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
1386 nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
1387 nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
1388 nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
1389 nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
1390 nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
1391 nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
1392 nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
1393 nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
1394 nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
1395 nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
1396 nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
1397 nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
1398 nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
1399 nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
1400 nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
1401 nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
1402 nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
1403 nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
1404 nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
1405 nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
1406 nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
1407 nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
1408 nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
1409 nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
1410 nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
1411 nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
1412 nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
1413 nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
1414 nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
1415 nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
1416 nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
1417 nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
1418 nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
1419 nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
1420 nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
1421 nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
1422 nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
1423 nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
1424 nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
1425 nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
1426 nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
1427 nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
1428 nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
1429 nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
1430 nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
1431 nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
1432 nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
1433 nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
1434 nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
1435 nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
1436 nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
1437 nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
1438 nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
1439 nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
1440 nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
1441 nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
1442 nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
1443 nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
1444 nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
1445 nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
1446 nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
1447 nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
1448 nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
1449 nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
1450 nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
1451 nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
1452 nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
1453 nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
1454 nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
1455 nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
1456 nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
1457 nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
1458 nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
1459 nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
1460 nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
1461 nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
1462 nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
1463 nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
1464 nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
1465 nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
1466 nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
1467 nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
1468 nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
1469 nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
1470 nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
1471 nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
1472 nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
1473 nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
1474 nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
1475 nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
1476 nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
1477 nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
1478 nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
1479 nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
1480 nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
1481 nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
1482 nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
1483 nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
1484 nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
1485 nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
1486 nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
1487 nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
1488 nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
1489 nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
1490 nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
1491 nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
1492 nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
1493 nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
1494 nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
1495 nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
1496 nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
1497 nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
1498 nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
1499 nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
1500 nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
1501 nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
1502 nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
1503 nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
1504 nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
1505 nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
1506 nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
1507 nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
1508 nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
1509 nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
1510 nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
1511 nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
1512 nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
1513 nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
1514 nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
1515 nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
1516 nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
1517 nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
1518 nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
1519 nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
1520 nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
1521 nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
1522 nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
1523 nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
1524 nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
1525 nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
1526 nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
1527 nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
1528 nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
1529 nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
1530 nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
1531 nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
1532 nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
1533 nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
1534 nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
1535 nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
1536 nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
1537 nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
1538 nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
1539 nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
1540 nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
1541 nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
1542 nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
1543 nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
1544 nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
1545 nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
1546 nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
1547 nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
1548 nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
1549 nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
1550 nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
1551 nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
1552 nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
1553 nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
1554 nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
1555 nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
1556 nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
1557 nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
1558 nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
1559 nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
1560 nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
1561 nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
1562 nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
1563 nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
1564 nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
1565 nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
1566 nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
1567 nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
1568 nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
1569 nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
1570 nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
1571 nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
1572 nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
1573 nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
1574 nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
1575 nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
1576 nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
1577 nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
1578 nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
1579 nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
1580 nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
1581 nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
1582 nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
1583 nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
1584 nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
1585 nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
1586 nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
1587 nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
1588 nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
1589 nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
1590 nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
1591 nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
1592 nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
1593 nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
1594 nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
1595 nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
1596 nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
1597 nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
1598 nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
1599 nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
1600 nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
1601 nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
1602 nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
1603 nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
1604 nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
1605 nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
1606 nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
1607 nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
1608 nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
1609 nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
1610 nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
1611 nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
1612 nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
1613 nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
1614 nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
1615 nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
1616 nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
1617 nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
1618 nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
1619 nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
1620 nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
1621 nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
1622 nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
1623 nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
1624 nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
1625 nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
1626 nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
1627 nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
1628 nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
1629 nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
1630 nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
1631 nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
1632 nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
1633 nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
1634 nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
1635 nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
1636 nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
1637 nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
1638 nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
1639 nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
1640 nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
1641 nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
1642 nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
1643 nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
1644 nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
1645 nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
1646 nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
1647 nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
1648 nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
1649 nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
1650 nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
1651 nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
1652 nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
1653 nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
1654 nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
1655 nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
1656 nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
1657 nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
1658 nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
1659 nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
1660 nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
1661 nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
1662 nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
1663 nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
1664 nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
1665 nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
1666 nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
1667 nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
1668 nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
1669 nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
1670 nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
1671 nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
1672 nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
1673 nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
1674 nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
1675 nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
1676 nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
1677 nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
1678 nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
1679 nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
1680 nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
1681 nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
1682 nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
1683 nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
1684 nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
1685 nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
1686 nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
1687 nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
1688 nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
1689 nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
1690 nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
1691 nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
1692 nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
1693 nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
1694 nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
1695 nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
1696 nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
1697 nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
1698 nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
1699 nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
1700 nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
1701 nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
1702 nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
1703 nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
1704 nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
1705 nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
1706 nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
1707 nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
1708 nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
1709 nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
1710 nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
1711 nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
1712 nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
1713 nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
1714 nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
1715 nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
1716 nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
1717 nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
1718 nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
1719 nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
1720 nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
1721 nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
1722 nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
1723 nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
1724 nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
1725 nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
1726 nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
1727 nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
1728 nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
1729 nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
1730 nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
1731 nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
1732 nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
1733 nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
1734 nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
1735 nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
1736 nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
1737 nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
1738 nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
1739 nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
1740 nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
1741 nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
1742 nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
1743 nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
1744 nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
1745 nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
1746 nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
1747 nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
1748 nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
1749 nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
1750 nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
1751 nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
1752 nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
1753 nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
1754 nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
1755 nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
1756 nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
1757 nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
1758 nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
1759 nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
1760 nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
1761 nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
1762 nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
1763 nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
1764 nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
1765 nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
1766 nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
1767 nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
1768 nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
1769 nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
1770 nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
1771 nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
1772 nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
1773 nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
1774 nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
1775 nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
1776 nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
1777 nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
1778 nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
1779 nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
1780 nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
1781 nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
1782 nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
1783 nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
1784 nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
1785 nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
1786 nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
1787 nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
1788 nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
1789 nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
1790 nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
1791 nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
1792 nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
1793 nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
1794 nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
1795 nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
1796 nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
1797 nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
1798 nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
1799 nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
1800 nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
1801 nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
1802 nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
1803 nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
1804 nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
1805 nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
1806 nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
1807 nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
1808 nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
1809 nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
1810 nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
1811 nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
1812 nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
1813 nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
1814 nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
1815 nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
1816 nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
1817 nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
1818 nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
1819 nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
1820 nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
1821 nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
1822 nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
1823 nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
1824 nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
1825 nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
1826 nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
1827 nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
1828 nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
1829 nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
1830 nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
1831 nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
1832 nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
1833 nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
1834 nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
1835 nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
1836 nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
1837 nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
1838 nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
1839 nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
1840 nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
1841 nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
1842 nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
1843 nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
1844 nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
1845 nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
1846 nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
1847 nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
1848 nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
1849 nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
1850 nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
1851 nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
1852 nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
1853 nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
1854 nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
1855 nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
1856 nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
1857 nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
1858 nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
1859 nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
1860 nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
1861 nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
1862 nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
1863 nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
1864 nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
1865 nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
1866 nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
1867 nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
1868 nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
1869 nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
1870 nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
1871 nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
1872 nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
1873 nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
1874 nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
1875 nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
1876 nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
1877 nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
1878 nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
1879 nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
1880 nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
1881 nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
1882 nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
1883 nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
1884 nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
1885 nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
1886 nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
1887 nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
1888 nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
1889 nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
1890 nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
1891 nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
1892 nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
1893 nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
1894 nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
1895 nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
1896 nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
1897 nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
1898 nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
1899 nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
1900 nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
1901 nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
1902 nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
1903 nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
1904 nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
1905 nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
1906 nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
1907 nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
1908 nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
1909 nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
1910 nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
1911 nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
1912 nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
1913 nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
1914 nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
1915 nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
1916 nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
1917 nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
1918 nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
1919 nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
1920 nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
1921 nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
1922 nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
1923 nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
1924 nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
1925 nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
1926 nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
1927 nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
1928 nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
1929 nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
1930 nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
1931 nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
1932 nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
1933 nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
1934 nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
1935 nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
1936 nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
1937 nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
1938 nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
1939 nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
1940 nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
1941 nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
1942 nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
1943 nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
1944 nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
1945 nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
1946 nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
1947 nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
1948 nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
1949 nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
1950 nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
1951 nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
1952 nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
1953 nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
1954 nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
1955 nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
1956 nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
1957 nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
1958 nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
1959 nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
1960 nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
1961 nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
1962 nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
1963 nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
1964 nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
1965 nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
1966 nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
1967 nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
1968 nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
1969 nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
1970 nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
1971 nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
1972 nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
1973 nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
1974 nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
1975 nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
1976 nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
1977 nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
1978 nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
1979 nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
1980 nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
1981 nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
1982 nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
1983 nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
1984 nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
1985 nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
1986 nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
1987 nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
1988 nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
1989 nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
1990 nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
1991 nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
1992 nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
1993 nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
1994 nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
1995 nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
1996 nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
1997 nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
1998 nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
1999 nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
2000 nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
2001 nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
2002 nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
2003 nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
2004 nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
2005 nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
2006 nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
2007 nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
2008 nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
2009 nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
2010 nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
2011 nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
2012 nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
2013 nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
2014 nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
2015 nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
2016 nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
2017 nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
2018 nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
2019 nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
2020 nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
2021 nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
2022 nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
2023 nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
2024 nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
2025 nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
2026 nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
2027 nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
2028 nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
2029 nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
2030 nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
2031 nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
2032 nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
2033 nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
2034 nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
2035 nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
2036 nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
2037 nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
2038 nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
2039 nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
2040 nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
2041 nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
2042 nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
2043 nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
2044 nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
2045 nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
2046 nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
2047 nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
2048 nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
2049 nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
2050 nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
2051 nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
2052 nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
2053 nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
2054 nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
2055 nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
2056 nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
2057 nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
2058 nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
2059 nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
2060 nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
2061 nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
2062 nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
2063 nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
2064 nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
2065 nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
2066 nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
2067 nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
2068 nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
2069 nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
2070 nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
2071 nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
2072 nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
2073 nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
2074 nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
2075 nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
2076 nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
2077 nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
2078 nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
2079 nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
2080 nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
2081 nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
2082 nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
2083 nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
2084 nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
2085 nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
2086 nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
2087 nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
2088 nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
2089 nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
2090 nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
2091 nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
2092 nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
2093 nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
2094 nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
2095 nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
2096 nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
2097 nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
2098 nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
2099 nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
2100 nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
2101 nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
2102 nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
2103 nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
2104 nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
2105 nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
2106 nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
2107 nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
2108 nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
2109 nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
2110 nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
2111 nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
2112 nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
2113 nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
2114 nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
2115 nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
2116 nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
2117 nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
2118 nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
2119 nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
2120 nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
2121 nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
2122 nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
2123 nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
2124 nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
2125 nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
2126 nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
2127 nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
2128 nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
2129 nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
2130 nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
2131 nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
2132 nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
2133 nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
2134 nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
2135 nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
2136 nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
2137 nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
2138 nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
2139 nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
2140 nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
2141 nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
2142 nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
2143 nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
2144 nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
2145 nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
2146 nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
2147 nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
2148 nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
2149 nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
2150 nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
2151 nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
2152 nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
2153 nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
2154 nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
2155 nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
2156 nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
2157 nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
2158 nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
2159 nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
2160 nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
2161}
2162
2163static void
2164nve0_grctx_generate_902d(struct drm_device *dev)
2165{
2166 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
2167 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
2168 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
2169 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
2170 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
2171 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
2172 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
2173 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
2174 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
2175 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
2176 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
2177 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
2178 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
2179 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
2180 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
2181 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
2182 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
2183 nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
2184}
2185
2186static void
2187nve0_graph_generate_unk40xx(struct drm_device *dev)
2188{
2189 nv_wr32(dev, 0x404010, 0x0);
2190 nv_wr32(dev, 0x404014, 0x0);
2191 nv_wr32(dev, 0x404018, 0x0);
2192 nv_wr32(dev, 0x40401c, 0x0);
2193 nv_wr32(dev, 0x404020, 0x0);
2194 nv_wr32(dev, 0x404024, 0xe000);
2195 nv_wr32(dev, 0x404028, 0x0);
2196 nv_wr32(dev, 0x4040a8, 0x0);
2197 nv_wr32(dev, 0x4040ac, 0x0);
2198 nv_wr32(dev, 0x4040b0, 0x0);
2199 nv_wr32(dev, 0x4040b4, 0x0);
2200 nv_wr32(dev, 0x4040b8, 0x0);
2201 nv_wr32(dev, 0x4040bc, 0x0);
2202 nv_wr32(dev, 0x4040c0, 0x0);
2203 nv_wr32(dev, 0x4040c4, 0x0);
2204 nv_wr32(dev, 0x4040c8, 0xf800008f);
2205 nv_wr32(dev, 0x4040d0, 0x0);
2206 nv_wr32(dev, 0x4040d4, 0x0);
2207 nv_wr32(dev, 0x4040d8, 0x0);
2208 nv_wr32(dev, 0x4040dc, 0x0);
2209 nv_wr32(dev, 0x4040e0, 0x0);
2210 nv_wr32(dev, 0x4040e4, 0x0);
2211 nv_wr32(dev, 0x4040e8, 0x1000);
2212 nv_wr32(dev, 0x4040f8, 0x0);
2213 nv_wr32(dev, 0x404130, 0x0);
2214 nv_wr32(dev, 0x404134, 0x0);
2215 nv_wr32(dev, 0x404138, 0x20000040);
2216 nv_wr32(dev, 0x404150, 0x2e);
2217 nv_wr32(dev, 0x404154, 0x400);
2218 nv_wr32(dev, 0x404158, 0x200);
2219 nv_wr32(dev, 0x404164, 0x55);
2220 nv_wr32(dev, 0x4041a0, 0x0);
2221 nv_wr32(dev, 0x4041a4, 0x0);
2222 nv_wr32(dev, 0x4041a8, 0x0);
2223 nv_wr32(dev, 0x4041ac, 0x0);
2224 nv_wr32(dev, 0x404200, 0x0);
2225 nv_wr32(dev, 0x404204, 0x0);
2226 nv_wr32(dev, 0x404208, 0x0);
2227 nv_wr32(dev, 0x40420c, 0x0);
2228}
2229
2230static void
2231nve0_graph_generate_unk44xx(struct drm_device *dev)
2232{
2233 nv_wr32(dev, 0x404404, 0x0);
2234 nv_wr32(dev, 0x404408, 0x0);
2235 nv_wr32(dev, 0x40440c, 0x0);
2236 nv_wr32(dev, 0x404410, 0x0);
2237 nv_wr32(dev, 0x404414, 0x0);
2238 nv_wr32(dev, 0x404418, 0x0);
2239 nv_wr32(dev, 0x40441c, 0x0);
2240 nv_wr32(dev, 0x404420, 0x0);
2241 nv_wr32(dev, 0x404424, 0x0);
2242 nv_wr32(dev, 0x404428, 0x0);
2243 nv_wr32(dev, 0x40442c, 0x0);
2244 nv_wr32(dev, 0x404430, 0x0);
2245 nv_wr32(dev, 0x404434, 0x0);
2246 nv_wr32(dev, 0x404438, 0x0);
2247 nv_wr32(dev, 0x404460, 0x0);
2248 nv_wr32(dev, 0x404464, 0x0);
2249 nv_wr32(dev, 0x404468, 0xffffff);
2250 nv_wr32(dev, 0x40446c, 0x0);
2251 nv_wr32(dev, 0x404480, 0x1);
2252 nv_wr32(dev, 0x404498, 0x1);
2253}
2254
2255static void
2256nve0_graph_generate_unk46xx(struct drm_device *dev)
2257{
2258 nv_wr32(dev, 0x404604, 0x14);
2259 nv_wr32(dev, 0x404608, 0x0);
2260 nv_wr32(dev, 0x40460c, 0x3fff);
2261 nv_wr32(dev, 0x404610, 0x100);
2262 nv_wr32(dev, 0x404618, 0x0);
2263 nv_wr32(dev, 0x40461c, 0x0);
2264 nv_wr32(dev, 0x404620, 0x0);
2265 nv_wr32(dev, 0x404624, 0x0);
2266 nv_wr32(dev, 0x40462c, 0x0);
2267 nv_wr32(dev, 0x404630, 0x0);
2268 nv_wr32(dev, 0x404640, 0x0);
2269 nv_wr32(dev, 0x404654, 0x0);
2270 nv_wr32(dev, 0x404660, 0x0);
2271 nv_wr32(dev, 0x404678, 0x0);
2272 nv_wr32(dev, 0x40467c, 0x2);
2273 nv_wr32(dev, 0x404680, 0x0);
2274 nv_wr32(dev, 0x404684, 0x0);
2275 nv_wr32(dev, 0x404688, 0x0);
2276 nv_wr32(dev, 0x40468c, 0x0);
2277 nv_wr32(dev, 0x404690, 0x0);
2278 nv_wr32(dev, 0x404694, 0x0);
2279 nv_wr32(dev, 0x404698, 0x0);
2280 nv_wr32(dev, 0x40469c, 0x0);
2281 nv_wr32(dev, 0x4046a0, 0x7f0080);
2282 nv_wr32(dev, 0x4046a4, 0x0);
2283 nv_wr32(dev, 0x4046a8, 0x0);
2284 nv_wr32(dev, 0x4046ac, 0x0);
2285 nv_wr32(dev, 0x4046b0, 0x0);
2286 nv_wr32(dev, 0x4046b4, 0x0);
2287 nv_wr32(dev, 0x4046b8, 0x0);
2288 nv_wr32(dev, 0x4046bc, 0x0);
2289 nv_wr32(dev, 0x4046c0, 0x0);
2290 nv_wr32(dev, 0x4046c8, 0x0);
2291 nv_wr32(dev, 0x4046cc, 0x0);
2292 nv_wr32(dev, 0x4046d0, 0x0);
2293}
2294
2295static void
2296nve0_graph_generate_unk47xx(struct drm_device *dev)
2297{
2298 nv_wr32(dev, 0x404700, 0x0);
2299 nv_wr32(dev, 0x404704, 0x0);
2300 nv_wr32(dev, 0x404708, 0x0);
2301 nv_wr32(dev, 0x404718, 0x0);
2302 nv_wr32(dev, 0x40471c, 0x0);
2303 nv_wr32(dev, 0x404720, 0x0);
2304 nv_wr32(dev, 0x404724, 0x0);
2305 nv_wr32(dev, 0x404728, 0x0);
2306 nv_wr32(dev, 0x40472c, 0x0);
2307 nv_wr32(dev, 0x404730, 0x0);
2308 nv_wr32(dev, 0x404734, 0x100);
2309 nv_wr32(dev, 0x404738, 0x0);
2310 nv_wr32(dev, 0x40473c, 0x0);
2311 nv_wr32(dev, 0x404744, 0x0);
2312 nv_wr32(dev, 0x404748, 0x0);
2313 nv_wr32(dev, 0x404754, 0x0);
2314}
2315
2316static void
2317nve0_graph_generate_unk58xx(struct drm_device *dev)
2318{
2319 nv_wr32(dev, 0x405800, 0xf8000bf);
2320 nv_wr32(dev, 0x405830, 0x2180648);
2321 nv_wr32(dev, 0x405834, 0x8000000);
2322 nv_wr32(dev, 0x405838, 0x0);
2323 nv_wr32(dev, 0x405854, 0x0);
2324 nv_wr32(dev, 0x405870, 0x1);
2325 nv_wr32(dev, 0x405874, 0x1);
2326 nv_wr32(dev, 0x405878, 0x1);
2327 nv_wr32(dev, 0x40587c, 0x1);
2328 nv_wr32(dev, 0x405a00, 0x0);
2329 nv_wr32(dev, 0x405a04, 0x0);
2330 nv_wr32(dev, 0x405a18, 0x0);
2331 nv_wr32(dev, 0x405b00, 0x0);
2332 nv_wr32(dev, 0x405b10, 0x1000);
2333}
2334
2335static void
2336nve0_graph_generate_unk60xx(struct drm_device *dev)
2337{
2338 nv_wr32(dev, 0x406020, 0x4103c1);
2339 nv_wr32(dev, 0x406028, 0x1);
2340 nv_wr32(dev, 0x40602c, 0x1);
2341 nv_wr32(dev, 0x406030, 0x1);
2342 nv_wr32(dev, 0x406034, 0x1);
2343}
2344
2345static void
2346nve0_graph_generate_unk64xx(struct drm_device *dev)
2347{
2348 nv_wr32(dev, 0x4064a8, 0x0);
2349 nv_wr32(dev, 0x4064ac, 0x3fff);
2350 nv_wr32(dev, 0x4064b4, 0x0);
2351 nv_wr32(dev, 0x4064b8, 0x0);
2352 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2353 nv_wr32(dev, 0x4064c4, 0x192ffff);
2354 nv_wr32(dev, 0x4064c8, 0x1800600);
2355 nv_wr32(dev, 0x4064cc, 0x0);
2356 nv_wr32(dev, 0x4064d0, 0x0);
2357 nv_wr32(dev, 0x4064d4, 0x0);
2358 nv_wr32(dev, 0x4064d8, 0x0);
2359 nv_wr32(dev, 0x4064dc, 0x0);
2360 nv_wr32(dev, 0x4064e0, 0x0);
2361 nv_wr32(dev, 0x4064e4, 0x0);
2362 nv_wr32(dev, 0x4064e8, 0x0);
2363 nv_wr32(dev, 0x4064ec, 0x0);
2364 nv_wr32(dev, 0x4064fc, 0x22a);
2365}
2366
2367static void
2368nve0_graph_generate_unk70xx(struct drm_device *dev)
2369{
2370 nv_wr32(dev, 0x407040, 0x0);
2371}
2372
2373static void
2374nve0_graph_generate_unk78xx(struct drm_device *dev)
2375{
2376 nv_wr32(dev, 0x407804, 0x23);
2377 nv_wr32(dev, 0x40780c, 0xa418820);
2378 nv_wr32(dev, 0x407810, 0x62080e6);
2379 nv_wr32(dev, 0x407814, 0x20398a4);
2380 nv_wr32(dev, 0x407818, 0xe629062);
2381 nv_wr32(dev, 0x40781c, 0xa418820);
2382 nv_wr32(dev, 0x407820, 0xe6);
2383 nv_wr32(dev, 0x4078bc, 0x103);
2384}
2385
2386static void
2387nve0_graph_generate_unk80xx(struct drm_device *dev)
2388{
2389 nv_wr32(dev, 0x408000, 0x0);
2390 nv_wr32(dev, 0x408004, 0x0);
2391 nv_wr32(dev, 0x408008, 0x30);
2392 nv_wr32(dev, 0x40800c, 0x0);
2393 nv_wr32(dev, 0x408010, 0x0);
2394 nv_wr32(dev, 0x408014, 0x69);
2395 nv_wr32(dev, 0x408018, 0xe100e100);
2396 nv_wr32(dev, 0x408064, 0x0);
2397}
2398
2399static void
2400nve0_graph_generate_unk88xx(struct drm_device *dev)
2401{
2402 nv_wr32(dev, 0x408800, 0x2802a3c);
2403 nv_wr32(dev, 0x408804, 0x40);
2404 nv_wr32(dev, 0x408808, 0x1043e005);
2405 nv_wr32(dev, 0x408840, 0xb);
2406 nv_wr32(dev, 0x408900, 0x3080b801);
2407 nv_wr32(dev, 0x408904, 0x62000001);
2408 nv_wr32(dev, 0x408908, 0xc8102f);
2409 nv_wr32(dev, 0x408980, 0x11d);
2410}
2411
2412static void
2413nve0_graph_generate_gpc(struct drm_device *dev)
2414{
2415 nv_wr32(dev, 0x418380, 0x16);
2416 nv_wr32(dev, 0x418400, 0x38004e00);
2417 nv_wr32(dev, 0x418404, 0x71e0ffff);
2418 nv_wr32(dev, 0x41840c, 0x1008);
2419 nv_wr32(dev, 0x418410, 0xfff0fff);
2420 nv_wr32(dev, 0x418414, 0x2200fff);
2421 nv_wr32(dev, 0x418450, 0x0);
2422 nv_wr32(dev, 0x418454, 0x0);
2423 nv_wr32(dev, 0x418458, 0x0);
2424 nv_wr32(dev, 0x41845c, 0x0);
2425 nv_wr32(dev, 0x418460, 0x0);
2426 nv_wr32(dev, 0x418464, 0x0);
2427 nv_wr32(dev, 0x418468, 0x1);
2428 nv_wr32(dev, 0x41846c, 0x0);
2429 nv_wr32(dev, 0x418470, 0x0);
2430 nv_wr32(dev, 0x418600, 0x1f);
2431 nv_wr32(dev, 0x418684, 0xf);
2432 nv_wr32(dev, 0x418700, 0x2);
2433 nv_wr32(dev, 0x418704, 0x80);
2434 nv_wr32(dev, 0x418708, 0x0);
2435 nv_wr32(dev, 0x41870c, 0x0);
2436 nv_wr32(dev, 0x418710, 0x0);
2437 nv_wr32(dev, 0x418800, 0x7006860a);
2438 nv_wr32(dev, 0x418808, 0x0);
2439 nv_wr32(dev, 0x41880c, 0x0);
2440 nv_wr32(dev, 0x418810, 0x0);
2441 nv_wr32(dev, 0x418828, 0x44);
2442 nv_wr32(dev, 0x418830, 0x10000001);
2443 nv_wr32(dev, 0x4188d8, 0x8);
2444 nv_wr32(dev, 0x4188e0, 0x1000000);
2445 nv_wr32(dev, 0x4188e8, 0x0);
2446 nv_wr32(dev, 0x4188ec, 0x0);
2447 nv_wr32(dev, 0x4188f0, 0x0);
2448 nv_wr32(dev, 0x4188f4, 0x0);
2449 nv_wr32(dev, 0x4188f8, 0x0);
2450 nv_wr32(dev, 0x4188fc, 0x20100018);
2451 nv_wr32(dev, 0x41891c, 0xff00ff);
2452 nv_wr32(dev, 0x418924, 0x0);
2453 nv_wr32(dev, 0x418928, 0xffff00);
2454 nv_wr32(dev, 0x41892c, 0xff00);
2455 nv_wr32(dev, 0x418a00, 0x0);
2456 nv_wr32(dev, 0x418a04, 0x0);
2457 nv_wr32(dev, 0x418a08, 0x0);
2458 nv_wr32(dev, 0x418a0c, 0x10000);
2459 nv_wr32(dev, 0x418a10, 0x0);
2460 nv_wr32(dev, 0x418a14, 0x0);
2461 nv_wr32(dev, 0x418a18, 0x0);
2462 nv_wr32(dev, 0x418a20, 0x0);
2463 nv_wr32(dev, 0x418a24, 0x0);
2464 nv_wr32(dev, 0x418a28, 0x0);
2465 nv_wr32(dev, 0x418a2c, 0x10000);
2466 nv_wr32(dev, 0x418a30, 0x0);
2467 nv_wr32(dev, 0x418a34, 0x0);
2468 nv_wr32(dev, 0x418a38, 0x0);
2469 nv_wr32(dev, 0x418a40, 0x0);
2470 nv_wr32(dev, 0x418a44, 0x0);
2471 nv_wr32(dev, 0x418a48, 0x0);
2472 nv_wr32(dev, 0x418a4c, 0x10000);
2473 nv_wr32(dev, 0x418a50, 0x0);
2474 nv_wr32(dev, 0x418a54, 0x0);
2475 nv_wr32(dev, 0x418a58, 0x0);
2476 nv_wr32(dev, 0x418a60, 0x0);
2477 nv_wr32(dev, 0x418a64, 0x0);
2478 nv_wr32(dev, 0x418a68, 0x0);
2479 nv_wr32(dev, 0x418a6c, 0x10000);
2480 nv_wr32(dev, 0x418a70, 0x0);
2481 nv_wr32(dev, 0x418a74, 0x0);
2482 nv_wr32(dev, 0x418a78, 0x0);
2483 nv_wr32(dev, 0x418a80, 0x0);
2484 nv_wr32(dev, 0x418a84, 0x0);
2485 nv_wr32(dev, 0x418a88, 0x0);
2486 nv_wr32(dev, 0x418a8c, 0x10000);
2487 nv_wr32(dev, 0x418a90, 0x0);
2488 nv_wr32(dev, 0x418a94, 0x0);
2489 nv_wr32(dev, 0x418a98, 0x0);
2490 nv_wr32(dev, 0x418aa0, 0x0);
2491 nv_wr32(dev, 0x418aa4, 0x0);
2492 nv_wr32(dev, 0x418aa8, 0x0);
2493 nv_wr32(dev, 0x418aac, 0x10000);
2494 nv_wr32(dev, 0x418ab0, 0x0);
2495 nv_wr32(dev, 0x418ab4, 0x0);
2496 nv_wr32(dev, 0x418ab8, 0x0);
2497 nv_wr32(dev, 0x418ac0, 0x0);
2498 nv_wr32(dev, 0x418ac4, 0x0);
2499 nv_wr32(dev, 0x418ac8, 0x0);
2500 nv_wr32(dev, 0x418acc, 0x10000);
2501 nv_wr32(dev, 0x418ad0, 0x0);
2502 nv_wr32(dev, 0x418ad4, 0x0);
2503 nv_wr32(dev, 0x418ad8, 0x0);
2504 nv_wr32(dev, 0x418ae0, 0x0);
2505 nv_wr32(dev, 0x418ae4, 0x0);
2506 nv_wr32(dev, 0x418ae8, 0x0);
2507 nv_wr32(dev, 0x418aec, 0x10000);
2508 nv_wr32(dev, 0x418af0, 0x0);
2509 nv_wr32(dev, 0x418af4, 0x0);
2510 nv_wr32(dev, 0x418af8, 0x0);
2511 nv_wr32(dev, 0x418b00, 0x6);
2512 nv_wr32(dev, 0x418b08, 0xa418820);
2513 nv_wr32(dev, 0x418b0c, 0x62080e6);
2514 nv_wr32(dev, 0x418b10, 0x20398a4);
2515 nv_wr32(dev, 0x418b14, 0xe629062);
2516 nv_wr32(dev, 0x418b18, 0xa418820);
2517 nv_wr32(dev, 0x418b1c, 0xe6);
2518 nv_wr32(dev, 0x418bb8, 0x103);
2519 nv_wr32(dev, 0x418c08, 0x1);
2520 nv_wr32(dev, 0x418c10, 0x0);
2521 nv_wr32(dev, 0x418c14, 0x0);
2522 nv_wr32(dev, 0x418c18, 0x0);
2523 nv_wr32(dev, 0x418c1c, 0x0);
2524 nv_wr32(dev, 0x418c20, 0x0);
2525 nv_wr32(dev, 0x418c24, 0x0);
2526 nv_wr32(dev, 0x418c28, 0x0);
2527 nv_wr32(dev, 0x418c2c, 0x0);
2528 nv_wr32(dev, 0x418c40, 0xffffffff);
2529 nv_wr32(dev, 0x418c6c, 0x1);
2530 nv_wr32(dev, 0x418c80, 0x20200004);
2531 nv_wr32(dev, 0x418c8c, 0x1);
2532 nv_wr32(dev, 0x419000, 0x780);
2533 nv_wr32(dev, 0x419004, 0x0);
2534 nv_wr32(dev, 0x419008, 0x0);
2535 nv_wr32(dev, 0x419014, 0x4);
2536}
2537
2538static void
2539nve0_graph_generate_tpc(struct drm_device *dev)
2540{
2541 nv_wr32(dev, 0x419848, 0x0);
2542 nv_wr32(dev, 0x419864, 0x129);
2543 nv_wr32(dev, 0x419888, 0x0);
2544 nv_wr32(dev, 0x419a00, 0xf0);
2545 nv_wr32(dev, 0x419a04, 0x1);
2546 nv_wr32(dev, 0x419a08, 0x21);
2547 nv_wr32(dev, 0x419a0c, 0x20000);
2548 nv_wr32(dev, 0x419a10, 0x0);
2549 nv_wr32(dev, 0x419a14, 0x200);
2550 nv_wr32(dev, 0x419a1c, 0xc000);
2551 nv_wr32(dev, 0x419a20, 0x800);
2552 nv_wr32(dev, 0x419a30, 0x1);
2553 nv_wr32(dev, 0x419ac4, 0x37f440);
2554 nv_wr32(dev, 0x419c00, 0xa);
2555 nv_wr32(dev, 0x419c04, 0x80000006);
2556 nv_wr32(dev, 0x419c08, 0x2);
2557 nv_wr32(dev, 0x419c20, 0x0);
2558 nv_wr32(dev, 0x419c24, 0x84210);
2559 nv_wr32(dev, 0x419c28, 0x3efbefbe);
2560 nv_wr32(dev, 0x419ce8, 0x0);
2561 nv_wr32(dev, 0x419cf4, 0x3203);
2562 nv_wr32(dev, 0x419e04, 0x0);
2563 nv_wr32(dev, 0x419e08, 0x0);
2564 nv_wr32(dev, 0x419e0c, 0x0);
2565 nv_wr32(dev, 0x419e10, 0x402);
2566 nv_wr32(dev, 0x419e44, 0x13eff2);
2567 nv_wr32(dev, 0x419e48, 0x0);
2568 nv_wr32(dev, 0x419e4c, 0x7f);
2569 nv_wr32(dev, 0x419e50, 0x0);
2570 nv_wr32(dev, 0x419e54, 0x0);
2571 nv_wr32(dev, 0x419e58, 0x0);
2572 nv_wr32(dev, 0x419e5c, 0x0);
2573 nv_wr32(dev, 0x419e60, 0x0);
2574 nv_wr32(dev, 0x419e64, 0x0);
2575 nv_wr32(dev, 0x419e68, 0x0);
2576 nv_wr32(dev, 0x419e6c, 0x0);
2577 nv_wr32(dev, 0x419e70, 0x0);
2578 nv_wr32(dev, 0x419e74, 0x0);
2579 nv_wr32(dev, 0x419e78, 0x0);
2580 nv_wr32(dev, 0x419e7c, 0x0);
2581 nv_wr32(dev, 0x419e80, 0x0);
2582 nv_wr32(dev, 0x419e84, 0x0);
2583 nv_wr32(dev, 0x419e88, 0x0);
2584 nv_wr32(dev, 0x419e8c, 0x0);
2585 nv_wr32(dev, 0x419e90, 0x0);
2586 nv_wr32(dev, 0x419e94, 0x0);
2587 nv_wr32(dev, 0x419e98, 0x0);
2588 nv_wr32(dev, 0x419eac, 0x1fcf);
2589 nv_wr32(dev, 0x419eb0, 0xd3f);
2590 nv_wr32(dev, 0x419ec8, 0x1304f);
2591 nv_wr32(dev, 0x419f30, 0x0);
2592 nv_wr32(dev, 0x419f34, 0x0);
2593 nv_wr32(dev, 0x419f38, 0x0);
2594 nv_wr32(dev, 0x419f3c, 0x0);
2595 nv_wr32(dev, 0x419f40, 0x0);
2596 nv_wr32(dev, 0x419f44, 0x0);
2597 nv_wr32(dev, 0x419f48, 0x0);
2598 nv_wr32(dev, 0x419f4c, 0x0);
2599 nv_wr32(dev, 0x419f58, 0x0);
2600 nv_wr32(dev, 0x419f78, 0xb);
2601}
2602
2603static void
2604nve0_graph_generate_tpcunk(struct drm_device *dev)
2605{
2606 nv_wr32(dev, 0x41be24, 0x6);
2607 nv_wr32(dev, 0x41bec0, 0x12180000);
2608 nv_wr32(dev, 0x41bec4, 0x37f7f);
2609 nv_wr32(dev, 0x41bee4, 0x6480430);
2610 nv_wr32(dev, 0x41bf00, 0xa418820);
2611 nv_wr32(dev, 0x41bf04, 0x62080e6);
2612 nv_wr32(dev, 0x41bf08, 0x20398a4);
2613 nv_wr32(dev, 0x41bf0c, 0xe629062);
2614 nv_wr32(dev, 0x41bf10, 0xa418820);
2615 nv_wr32(dev, 0x41bf14, 0xe6);
2616 nv_wr32(dev, 0x41bfd0, 0x900103);
2617 nv_wr32(dev, 0x41bfe0, 0x400001);
2618 nv_wr32(dev, 0x41bfe4, 0x0);
2619}
2620
2621int
2622nve0_grctx_generate(struct nouveau_channel *chan)
2623{
2624 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
2625 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
2626 struct drm_device *dev = chan->dev;
2627 u32 data[6] = {}, data2[2] = {}, tmp;
2628 u32 tpc_set = 0, tpc_mask = 0;
2629 u8 tpcnr[GPC_MAX], a, b;
2630 u8 shift, ntpcv;
2631 int i, gpc, tpc, id;
2632
2633 nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
2634 nv_wr32(dev, 0x400204, 0x00000000);
2635 nv_wr32(dev, 0x400208, 0x00000000);
2636
2637 nve0_graph_generate_unk40xx(dev);
2638 nve0_graph_generate_unk44xx(dev);
2639 nve0_graph_generate_unk46xx(dev);
2640 nve0_graph_generate_unk47xx(dev);
2641 nve0_graph_generate_unk58xx(dev);
2642 nve0_graph_generate_unk60xx(dev);
2643 nve0_graph_generate_unk64xx(dev);
2644 nve0_graph_generate_unk70xx(dev);
2645 nve0_graph_generate_unk78xx(dev);
2646 nve0_graph_generate_unk80xx(dev);
2647 nve0_graph_generate_unk88xx(dev);
2648 nve0_graph_generate_gpc(dev);
2649 nve0_graph_generate_tpc(dev);
2650 nve0_graph_generate_tpcunk(dev);
2651
2652 nv_wr32(dev, 0x404154, 0x0);
2653
2654 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
2655 u32 reg = nv_ro32(grch->mmio, i + 0);
2656 u32 val = nv_ro32(grch->mmio, i + 4);
2657 nv_wr32(dev, reg, val);
2658 }
2659
2660 nv_wr32(dev, 0x418c6c, 0x1);
2661 nv_wr32(dev, 0x41980c, 0x10);
2662 nv_wr32(dev, 0x41be08, 0x4);
2663 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2664 nv_wr32(dev, 0x405800, 0xf8000bf);
2665 nv_wr32(dev, 0x419c00, 0xa);
2666
2667 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2668 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2669 if (tpc < priv->tpc_nr[gpc]) {
2670 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
2671 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
2672 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2673 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
2674 }
2675
2676 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2677 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2678 }
2679 }
2680
2681 tmp = 0;
2682 for (i = 0; i < priv->gpc_nr; i++)
2683 tmp |= priv->tpc_nr[i] << (i * 4);
2684 nv_wr32(dev, 0x406028, tmp);
2685 nv_wr32(dev, 0x405870, tmp);
2686
2687 nv_wr32(dev, 0x40602c, 0x0);
2688 nv_wr32(dev, 0x405874, 0x0);
2689 nv_wr32(dev, 0x406030, 0x0);
2690 nv_wr32(dev, 0x405878, 0x0);
2691 nv_wr32(dev, 0x406034, 0x0);
2692 nv_wr32(dev, 0x40587c, 0x0);
2693
2694 /* calculate first set of magics */
2695 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2696
2697 gpc = -1;
2698 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2699 do {
2700 gpc = (gpc + 1) % priv->gpc_nr;
2701 } while (!tpcnr[gpc]);
2702 tpcnr[gpc]--;
2703
2704 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2705 }
2706
2707 for (; tpc < 32; tpc++)
2708 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2709
2710 /* and the second... */
2711 shift = 0;
2712 ntpcv = priv->tpc_total;
2713 while (!(ntpcv & (1 << 4))) {
2714 ntpcv <<= 1;
2715 shift++;
2716 }
2717
2718 data2[0] = ntpcv << 16;
2719 data2[0] |= shift << 21;
2720 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2721 data2[0] |= priv->tpc_total << 8;
2722 data2[0] |= priv->magic_not_rop_nr;
2723 for (i = 1; i < 7; i++)
2724 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2725
2726 /* and write it all the various parts of PGRAPH */
2727 nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2728 for (i = 0; i < 6; i++)
2729 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
2730
2731 nv_wr32(dev, 0x41bfd0, data2[0]);
2732 nv_wr32(dev, 0x41bfe4, data2[1]);
2733 for (i = 0; i < 6; i++)
2734 nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
2735
2736 nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2737 for (i = 0; i < 6; i++)
2738 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
2739
2740
2741 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2742 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2743 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2744
2745 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2746 a = (i * (priv->tpc_total - 1)) / 32;
2747 if (a != b) {
2748 b = a;
2749 do {
2750 gpc = (gpc + 1) % priv->gpc_nr;
2751 } while (!tpcnr[gpc]);
2752 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2753
2754 tpc_set |= 1 << ((gpc * 8) + tpc);
2755 }
2756
2757 nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
2758 nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2759 }
2760
2761 for (i = 0; i < 8; i++)
2762 nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
2763
2764 nv_wr32(dev, 0x405b00, 0x201);
2765 nv_wr32(dev, 0x408850, 0x2);
2766 nv_wr32(dev, 0x408958, 0x2);
2767 nv_wr32(dev, 0x419f78, 0xa);
2768
2769 nve0_grctx_generate_icmd(dev);
2770 nve0_grctx_generate_a097(dev);
2771 nve0_grctx_generate_902d(dev);
2772
2773 nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
2774 nv_wr32(dev, 0x418800, 0x7026860a); //XXX
2775 nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
2776 return 0;
2777}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 2d98ff92f3ba..d568aa4b10d1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -66,6 +66,13 @@ static int udl_get_modes(struct drm_connector *connector)
66static int udl_mode_valid(struct drm_connector *connector, 66static int udl_mode_valid(struct drm_connector *connector,
67 struct drm_display_mode *mode) 67 struct drm_display_mode *mode)
68{ 68{
69 struct udl_device *udl = connector->dev->dev_private;
70 if (!udl->sku_pixel_limit)
71 return 0;
72
73 if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
74 return MODE_VIRTUAL_Y;
75
69 return 0; 76 return 0;
70} 77}
71 78
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f2fb8f15e2f1..7e0743358dff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
1018 } 1018 }
1019 1019
1020 1020
1021 event = kzalloc(sizeof(event->event), GFP_KERNEL); 1021 event = kzalloc(sizeof(*event), GFP_KERNEL);
1022 if (unlikely(event == NULL)) { 1022 if (unlikely(event == NULL)) {
1023 DRM_ERROR("Failed to allocate an event.\n"); 1023 DRM_ERROR("Failed to allocate an event.\n");
1024 ret = -ENOMEM; 1024 ret = -ENOMEM;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b64502dfa9f4..e89daf1b21b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -266,7 +266,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
266 266
267static int iommu_init_device(struct device *dev) 267static int iommu_init_device(struct device *dev)
268{ 268{
269 struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev); 269 struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
270 struct iommu_dev_data *dev_data; 270 struct iommu_dev_data *dev_data;
271 struct iommu_group *group; 271 struct iommu_group *group;
272 u16 alias; 272 u16 alias;
@@ -293,7 +293,9 @@ static int iommu_init_device(struct device *dev)
293 dev_data->alias_data = alias_data; 293 dev_data->alias_data = alias_data;
294 294
295 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); 295 dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
296 } else 296 }
297
298 if (dma_pdev == NULL)
297 dma_pdev = pci_dev_get(pdev); 299 dma_pdev = pci_dev_get(pdev);
298 300
299 /* Account for quirked devices */ 301 /* Account for quirked devices */
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d8abb90a6c2f..034233eefc82 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1555 unsigned long arg) 1555 unsigned long arg)
1556{ 1556{
1557 struct multipath *m = ti->private; 1557 struct multipath *m = ti->private;
1558 struct pgpath *pgpath;
1558 struct block_device *bdev; 1559 struct block_device *bdev;
1559 fmode_t mode; 1560 fmode_t mode;
1560 unsigned long flags; 1561 unsigned long flags;
@@ -1570,12 +1571,14 @@ again:
1570 if (!m->current_pgpath) 1571 if (!m->current_pgpath)
1571 __choose_pgpath(m, 0); 1572 __choose_pgpath(m, 0);
1572 1573
1573 if (m->current_pgpath) { 1574 pgpath = m->current_pgpath;
1574 bdev = m->current_pgpath->path.dev->bdev; 1575
1575 mode = m->current_pgpath->path.dev->mode; 1576 if (pgpath) {
1577 bdev = pgpath->path.dev->bdev;
1578 mode = pgpath->path.dev->mode;
1576 } 1579 }
1577 1580
1578 if (m->queue_io) 1581 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1579 r = -EAGAIN; 1582 r = -EAGAIN;
1580 else if (!bdev) 1583 else if (!bdev)
1581 r = -EIO; 1584 r = -EIO;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f90069029aae..100368eb7991 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1212 return &t->targets[(KEYS_PER_NODE * n) + k]; 1212 return &t->targets[(KEYS_PER_NODE * n) + k];
1213} 1213}
1214 1214
1215static int count_device(struct dm_target *ti, struct dm_dev *dev,
1216 sector_t start, sector_t len, void *data)
1217{
1218 unsigned *num_devices = data;
1219
1220 (*num_devices)++;
1221
1222 return 0;
1223}
1224
1225/*
1226 * Check whether a table has no data devices attached using each
1227 * target's iterate_devices method.
1228 * Returns false if the result is unknown because a target doesn't
1229 * support iterate_devices.
1230 */
1231bool dm_table_has_no_data_devices(struct dm_table *table)
1232{
1233 struct dm_target *uninitialized_var(ti);
1234 unsigned i = 0, num_devices = 0;
1235
1236 while (i < dm_table_get_num_targets(table)) {
1237 ti = dm_table_get_target(table, i++);
1238
1239 if (!ti->type->iterate_devices)
1240 return false;
1241
1242 ti->type->iterate_devices(ti, count_device, &num_devices);
1243 if (num_devices)
1244 return false;
1245 }
1246
1247 return true;
1248}
1249
1215/* 1250/*
1216 * Establish the new table's queue_limits and validate them. 1251 * Establish the new table's queue_limits and validate them.
1217 */ 1252 */
@@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1354 return q && blk_queue_nonrot(q); 1389 return q && blk_queue_nonrot(q);
1355} 1390}
1356 1391
1357static bool dm_table_is_nonrot(struct dm_table *t) 1392static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1393 sector_t start, sector_t len, void *data)
1394{
1395 struct request_queue *q = bdev_get_queue(dev->bdev);
1396
1397 return q && !blk_queue_add_random(q);
1398}
1399
1400static bool dm_table_all_devices_attribute(struct dm_table *t,
1401 iterate_devices_callout_fn func)
1358{ 1402{
1359 struct dm_target *ti; 1403 struct dm_target *ti;
1360 unsigned i = 0; 1404 unsigned i = 0;
1361 1405
1362 /* Ensure that all underlying device are non-rotational. */
1363 while (i < dm_table_get_num_targets(t)) { 1406 while (i < dm_table_get_num_targets(t)) {
1364 ti = dm_table_get_target(t, i++); 1407 ti = dm_table_get_target(t, i++);
1365 1408
1366 if (!ti->type->iterate_devices || 1409 if (!ti->type->iterate_devices ||
1367 !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) 1410 !ti->type->iterate_devices(ti, func, NULL))
1368 return 0; 1411 return 0;
1369 } 1412 }
1370 1413
@@ -1396,7 +1439,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1396 if (!dm_table_discard_zeroes_data(t)) 1439 if (!dm_table_discard_zeroes_data(t))
1397 q->limits.discard_zeroes_data = 0; 1440 q->limits.discard_zeroes_data = 0;
1398 1441
1399 if (dm_table_is_nonrot(t)) 1442 /* Ensure that all underlying devices are non-rotational. */
1443 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1400 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1444 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1401 else 1445 else
1402 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1446 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
@@ -1404,6 +1448,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1404 dm_table_set_integrity(t); 1448 dm_table_set_integrity(t);
1405 1449
1406 /* 1450 /*
1451 * Determine whether or not this queue's I/O timings contribute
1452 * to the entropy pool, Only request-based targets use this.
1453 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1454 * have it set.
1455 */
1456 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1457 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1458
1459 /*
1407 * QUEUE_FLAG_STACKABLE must be set after all queue settings are 1460 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1408 * visible to other CPUs because, once the flag is set, incoming bios 1461 * visible to other CPUs because, once the flag is set, incoming bios
1409 * are processed by request-based dm, which refers to the queue 1462 * are processed by request-based dm, which refers to the queue
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index af1fc3b2c2ad..c29410af1e22 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -509,9 +509,9 @@ enum pool_mode {
509struct pool_features { 509struct pool_features {
510 enum pool_mode mode; 510 enum pool_mode mode;
511 511
512 unsigned zero_new_blocks:1; 512 bool zero_new_blocks:1;
513 unsigned discard_enabled:1; 513 bool discard_enabled:1;
514 unsigned discard_passdown:1; 514 bool discard_passdown:1;
515}; 515};
516 516
517struct thin_c; 517struct thin_c;
@@ -580,7 +580,8 @@ struct pool_c {
580 struct dm_target_callbacks callbacks; 580 struct dm_target_callbacks callbacks;
581 581
582 dm_block_t low_water_blocks; 582 dm_block_t low_water_blocks;
583 struct pool_features pf; 583 struct pool_features requested_pf; /* Features requested during table load */
584 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
584}; 585};
585 586
586/* 587/*
@@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool)
1839/*---------------------------------------------------------------- 1840/*----------------------------------------------------------------
1840 * Binding of control targets to a pool object 1841 * Binding of control targets to a pool object
1841 *--------------------------------------------------------------*/ 1842 *--------------------------------------------------------------*/
1843static bool data_dev_supports_discard(struct pool_c *pt)
1844{
1845 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1846
1847 return q && blk_queue_discard(q);
1848}
1849
1850/*
1851 * If discard_passdown was enabled verify that the data device
1852 * supports discards. Disable discard_passdown if not.
1853 */
1854static void disable_passdown_if_not_supported(struct pool_c *pt)
1855{
1856 struct pool *pool = pt->pool;
1857 struct block_device *data_bdev = pt->data_dev->bdev;
1858 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1859 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1860 const char *reason = NULL;
1861 char buf[BDEVNAME_SIZE];
1862
1863 if (!pt->adjusted_pf.discard_passdown)
1864 return;
1865
1866 if (!data_dev_supports_discard(pt))
1867 reason = "discard unsupported";
1868
1869 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1870 reason = "max discard sectors smaller than a block";
1871
1872 else if (data_limits->discard_granularity > block_size)
1873 reason = "discard granularity larger than a block";
1874
1875 else if (block_size & (data_limits->discard_granularity - 1))
1876 reason = "discard granularity not a factor of block size";
1877
1878 if (reason) {
1879 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1880 pt->adjusted_pf.discard_passdown = false;
1881 }
1882}
1883
1842static int bind_control_target(struct pool *pool, struct dm_target *ti) 1884static int bind_control_target(struct pool *pool, struct dm_target *ti)
1843{ 1885{
1844 struct pool_c *pt = ti->private; 1886 struct pool_c *pt = ti->private;
@@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
1847 * We want to make sure that degraded pools are never upgraded. 1889 * We want to make sure that degraded pools are never upgraded.
1848 */ 1890 */
1849 enum pool_mode old_mode = pool->pf.mode; 1891 enum pool_mode old_mode = pool->pf.mode;
1850 enum pool_mode new_mode = pt->pf.mode; 1892 enum pool_mode new_mode = pt->adjusted_pf.mode;
1851 1893
1852 if (old_mode > new_mode) 1894 if (old_mode > new_mode)
1853 new_mode = old_mode; 1895 new_mode = old_mode;
1854 1896
1855 pool->ti = ti; 1897 pool->ti = ti;
1856 pool->low_water_blocks = pt->low_water_blocks; 1898 pool->low_water_blocks = pt->low_water_blocks;
1857 pool->pf = pt->pf; 1899 pool->pf = pt->adjusted_pf;
1858 set_pool_mode(pool, new_mode);
1859 1900
1860 /* 1901 set_pool_mode(pool, new_mode);
1861 * If discard_passdown was enabled verify that the data device
1862 * supports discards. Disable discard_passdown if not; otherwise
1863 * -EOPNOTSUPP will be returned.
1864 */
1865 /* FIXME: pull this out into a sep fn. */
1866 if (pt->pf.discard_passdown) {
1867 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1868 if (!q || !blk_queue_discard(q)) {
1869 char buf[BDEVNAME_SIZE];
1870 DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
1871 bdevname(pt->data_dev->bdev, buf));
1872 pool->pf.discard_passdown = 0;
1873 }
1874 }
1875 1902
1876 return 0; 1903 return 0;
1877} 1904}
@@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1889static void pool_features_init(struct pool_features *pf) 1916static void pool_features_init(struct pool_features *pf)
1890{ 1917{
1891 pf->mode = PM_WRITE; 1918 pf->mode = PM_WRITE;
1892 pf->zero_new_blocks = 1; 1919 pf->zero_new_blocks = true;
1893 pf->discard_enabled = 1; 1920 pf->discard_enabled = true;
1894 pf->discard_passdown = 1; 1921 pf->discard_passdown = true;
1895} 1922}
1896 1923
1897static void __pool_destroy(struct pool *pool) 1924static void __pool_destroy(struct pool *pool)
@@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2119 argc--; 2146 argc--;
2120 2147
2121 if (!strcasecmp(arg_name, "skip_block_zeroing")) 2148 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2122 pf->zero_new_blocks = 0; 2149 pf->zero_new_blocks = false;
2123 2150
2124 else if (!strcasecmp(arg_name, "ignore_discard")) 2151 else if (!strcasecmp(arg_name, "ignore_discard"))
2125 pf->discard_enabled = 0; 2152 pf->discard_enabled = false;
2126 2153
2127 else if (!strcasecmp(arg_name, "no_discard_passdown")) 2154 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2128 pf->discard_passdown = 0; 2155 pf->discard_passdown = false;
2129 2156
2130 else if (!strcasecmp(arg_name, "read_only")) 2157 else if (!strcasecmp(arg_name, "read_only"))
2131 pf->mode = PM_READ_ONLY; 2158 pf->mode = PM_READ_ONLY;
@@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2259 pt->metadata_dev = metadata_dev; 2286 pt->metadata_dev = metadata_dev;
2260 pt->data_dev = data_dev; 2287 pt->data_dev = data_dev;
2261 pt->low_water_blocks = low_water_blocks; 2288 pt->low_water_blocks = low_water_blocks;
2262 pt->pf = pf; 2289 pt->adjusted_pf = pt->requested_pf = pf;
2263 ti->num_flush_requests = 1; 2290 ti->num_flush_requests = 1;
2291
2264 /* 2292 /*
2265 * Only need to enable discards if the pool should pass 2293 * Only need to enable discards if the pool should pass
2266 * them down to the data device. The thin device's discard 2294 * them down to the data device. The thin device's discard
@@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2268 */ 2296 */
2269 if (pf.discard_enabled && pf.discard_passdown) { 2297 if (pf.discard_enabled && pf.discard_passdown) {
2270 ti->num_discard_requests = 1; 2298 ti->num_discard_requests = 1;
2299
2271 /* 2300 /*
2272 * Setting 'discards_supported' circumvents the normal 2301 * Setting 'discards_supported' circumvents the normal
2273 * stacking of discard limits (this keeps the pool and 2302 * stacking of discard limits (this keeps the pool and
2274 * thin devices' discard limits consistent). 2303 * thin devices' discard limits consistent).
2275 */ 2304 */
2276 ti->discards_supported = true; 2305 ti->discards_supported = true;
2306 ti->discard_zeroes_data_unsupported = true;
2277 } 2307 }
2278 ti->private = pt; 2308 ti->private = pt;
2279 2309
@@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2703 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), 2733 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2704 (unsigned long)pool->sectors_per_block, 2734 (unsigned long)pool->sectors_per_block,
2705 (unsigned long long)pt->low_water_blocks); 2735 (unsigned long long)pt->low_water_blocks);
2706 emit_flags(&pt->pf, result, sz, maxlen); 2736 emit_flags(&pt->requested_pf, result, sz, maxlen);
2707 break; 2737 break;
2708 } 2738 }
2709 2739
@@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2732 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2762 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2733} 2763}
2734 2764
2735static void set_discard_limits(struct pool *pool, struct queue_limits *limits) 2765static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2736{ 2766{
2737 /* 2767 struct pool *pool = pt->pool;
2738 * FIXME: these limits may be incompatible with the pool's data device 2768 struct queue_limits *data_limits;
2739 */ 2769
2740 limits->max_discard_sectors = pool->sectors_per_block; 2770 limits->max_discard_sectors = pool->sectors_per_block;
2741 2771
2742 /* 2772 /*
2743 * This is just a hint, and not enforced. We have to cope with 2773 * discard_granularity is just a hint, and not enforced.
2744 * bios that cover a block partially. A discard that spans a block
2745 * boundary is not sent to this target.
2746 */ 2774 */
2747 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 2775 if (pt->adjusted_pf.discard_passdown) {
2748 limits->discard_zeroes_data = pool->pf.zero_new_blocks; 2776 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2777 limits->discard_granularity = data_limits->discard_granularity;
2778 } else
2779 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2749} 2780}
2750 2781
2751static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) 2782static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2755 2786
2756 blk_limits_io_min(limits, 0); 2787 blk_limits_io_min(limits, 0);
2757 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); 2788 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2758 if (pool->pf.discard_enabled) 2789
2759 set_discard_limits(pool, limits); 2790 /*
2791 * pt->adjusted_pf is a staging area for the actual features to use.
2792 * They get transferred to the live pool in bind_control_target()
2793 * called from pool_preresume().
2794 */
2795 if (!pt->adjusted_pf.discard_enabled)
2796 return;
2797
2798 disable_passdown_if_not_supported(pt);
2799
2800 set_discard_limits(pt, limits);
2760} 2801}
2761 2802
2762static struct target_type pool_target = { 2803static struct target_type pool_target = {
2763 .name = "thin-pool", 2804 .name = "thin-pool",
2764 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2805 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2765 DM_TARGET_IMMUTABLE, 2806 DM_TARGET_IMMUTABLE,
2766 .version = {1, 3, 0}, 2807 .version = {1, 4, 0},
2767 .module = THIS_MODULE, 2808 .module = THIS_MODULE,
2768 .ctr = pool_ctr, 2809 .ctr = pool_ctr,
2769 .dtr = pool_dtr, 2810 .dtr = pool_dtr,
@@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti,
3042 return 0; 3083 return 0;
3043} 3084}
3044 3085
3086/*
3087 * A thin device always inherits its queue limits from its pool.
3088 */
3045static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 3089static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3046{ 3090{
3047 struct thin_c *tc = ti->private; 3091 struct thin_c *tc = ti->private;
3048 struct pool *pool = tc->pool;
3049 3092
3050 blk_limits_io_min(limits, 0); 3093 *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
3051 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3052 set_discard_limits(pool, limits);
3053} 3094}
3054 3095
3055static struct target_type thin_target = { 3096static struct target_type thin_target = {
3056 .name = "thin", 3097 .name = "thin",
3057 .version = {1, 3, 0}, 3098 .version = {1, 4, 0},
3058 .module = THIS_MODULE, 3099 .module = THIS_MODULE,
3059 .ctr = thin_ctr, 3100 .ctr = thin_ctr,
3060 .dtr = thin_dtr, 3101 .dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 254d19268ad2..892ae2766aa6 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
718 v->hash_dev_block_bits = ffs(num) - 1; 718 v->hash_dev_block_bits = ffs(num) - 1;
719 719
720 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || 720 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
721 num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != 721 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
722 (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { 722 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
723 ti->error = "Invalid data blocks"; 723 ti->error = "Invalid data blocks";
724 r = -EINVAL; 724 r = -EINVAL;
725 goto bad; 725 goto bad;
@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
733 } 733 }
734 734
735 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || 735 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
736 num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != 736 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
737 (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { 737 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
738 ti->error = "Invalid hash start"; 738 ti->error = "Invalid hash start";
739 r = -EINVAL; 739 r = -EINVAL;
740 goto bad; 740 goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e09b6ff5b49..67ffa391edcf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
865{ 865{
866 int r = error; 866 int r = error;
867 struct dm_rq_target_io *tio = clone->end_io_data; 867 struct dm_rq_target_io *tio = clone->end_io_data;
868 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 868 dm_request_endio_fn rq_end_io = NULL;
869 869
870 if (mapped && rq_end_io) 870 if (tio->ti) {
871 r = rq_end_io(tio->ti, clone, error, &tio->info); 871 rq_end_io = tio->ti->type->rq_end_io;
872
873 if (mapped && rq_end_io)
874 r = rq_end_io(tio->ti, clone, error, &tio->info);
875 }
872 876
873 if (r <= 0) 877 if (r <= 0)
874 /* The target wants to complete the I/O */ 878 /* The target wants to complete the I/O */
@@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
1588 int r, requeued = 0; 1592 int r, requeued = 0;
1589 struct dm_rq_target_io *tio = clone->end_io_data; 1593 struct dm_rq_target_io *tio = clone->end_io_data;
1590 1594
1591 /*
1592 * Hold the md reference here for the in-flight I/O.
1593 * We can't rely on the reference count by device opener,
1594 * because the device may be closed during the request completion
1595 * when all bios are completed.
1596 * See the comment in rq_completed() too.
1597 */
1598 dm_get(md);
1599
1600 tio->ti = ti; 1595 tio->ti = ti;
1601 r = ti->type->map_rq(ti, clone, &tio->info); 1596 r = ti->type->map_rq(ti, clone, &tio->info);
1602 switch (r) { 1597 switch (r) {
@@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
1628 return requeued; 1623 return requeued;
1629} 1624}
1630 1625
1626static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1627{
1628 struct request *clone;
1629
1630 blk_start_request(orig);
1631 clone = orig->special;
1632 atomic_inc(&md->pending[rq_data_dir(clone)]);
1633
1634 /*
1635 * Hold the md reference here for the in-flight I/O.
1636 * We can't rely on the reference count by device opener,
1637 * because the device may be closed during the request completion
1638 * when all bios are completed.
1639 * See the comment in rq_completed() too.
1640 */
1641 dm_get(md);
1642
1643 return clone;
1644}
1645
1631/* 1646/*
1632 * q->request_fn for request-based dm. 1647 * q->request_fn for request-based dm.
1633 * Called with the queue lock held. 1648 * Called with the queue lock held.
@@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
1657 pos = blk_rq_pos(rq); 1672 pos = blk_rq_pos(rq);
1658 1673
1659 ti = dm_table_find_target(map, pos); 1674 ti = dm_table_find_target(map, pos);
1660 BUG_ON(!dm_target_is_valid(ti)); 1675 if (!dm_target_is_valid(ti)) {
1676 /*
1677 * Must perform setup, that dm_done() requires,
1678 * before calling dm_kill_unmapped_request
1679 */
1680 DMERR_LIMIT("request attempted access beyond the end of device");
1681 clone = dm_start_request(md, rq);
1682 dm_kill_unmapped_request(clone, -EIO);
1683 continue;
1684 }
1661 1685
1662 if (ti->type->busy && ti->type->busy(ti)) 1686 if (ti->type->busy && ti->type->busy(ti))
1663 goto delay_and_out; 1687 goto delay_and_out;
1664 1688
1665 blk_start_request(rq); 1689 clone = dm_start_request(md, rq);
1666 clone = rq->special;
1667 atomic_inc(&md->pending[rq_data_dir(clone)]);
1668 1690
1669 spin_unlock(q->queue_lock); 1691 spin_unlock(q->queue_lock);
1670 if (map_request(ti, clone, md)) 1692 if (map_request(ti, clone, md))
@@ -1684,8 +1706,6 @@ delay_and_out:
1684 blk_delay_queue(q, HZ / 10); 1706 blk_delay_queue(q, HZ / 10);
1685out: 1707out:
1686 dm_table_put(map); 1708 dm_table_put(map);
1687
1688 return;
1689} 1709}
1690 1710
1691int dm_underlying_device_busy(struct request_queue *q) 1711int dm_underlying_device_busy(struct request_queue *q)
@@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)
2409 */ 2429 */
2410struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) 2430struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2411{ 2431{
2412 struct dm_table *map = ERR_PTR(-EINVAL); 2432 struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
2413 struct queue_limits limits; 2433 struct queue_limits limits;
2414 int r; 2434 int r;
2415 2435
@@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2419 if (!dm_suspended_md(md)) 2439 if (!dm_suspended_md(md))
2420 goto out; 2440 goto out;
2421 2441
2442 /*
2443 * If the new table has no data devices, retain the existing limits.
2444 * This helps multipath with queue_if_no_path if all paths disappear,
2445 * then new I/O is queued based on these limits, and then some paths
2446 * reappear.
2447 */
2448 if (dm_table_has_no_data_devices(table)) {
2449 live_map = dm_get_live_table(md);
2450 if (live_map)
2451 limits = md->queue->limits;
2452 dm_table_put(live_map);
2453 }
2454
2422 r = dm_calculate_queue_limits(table, &limits); 2455 r = dm_calculate_queue_limits(table, &limits);
2423 if (r) { 2456 if (r) {
2424 map = ERR_PTR(r); 2457 map = ERR_PTR(r);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 52eef493d266..6a99fefaa743 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t,
54 void (*fn)(void *), void *context); 54 void (*fn)(void *), void *context);
55struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 55struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
56struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 56struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
57bool dm_table_has_no_data_devices(struct dm_table *table);
57int dm_calculate_queue_limits(struct dm_table *table, 58int dm_calculate_queue_limits(struct dm_table *table,
58 struct queue_limits *limits); 59 struct queue_limits *limits);
59void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 60void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c2eb38f3c51..0138a727c1f3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1512 do { 1512 do {
1513 int n = conf->copies; 1513 int n = conf->copies;
1514 int cnt = 0; 1514 int cnt = 0;
1515 int this = first;
1515 while (n--) { 1516 while (n--) {
1516 if (conf->mirrors[first].rdev && 1517 if (conf->mirrors[this].rdev &&
1517 first != ignore) 1518 this != ignore)
1518 cnt++; 1519 cnt++;
1519 first = (first+1) % geo->raid_disks; 1520 this = (this+1) % geo->raid_disks;
1520 } 1521 }
1521 if (cnt == 0) 1522 if (cnt == 0)
1522 return 0; 1523 return 0;
1524 first = (first + geo->near_copies) % geo->raid_disks;
1523 } while (first != 0); 1525 } while (first != 0);
1524 return 1; 1526 return 1;
1525} 1527}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7031b865b3a0..0689173fd9f5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
1591 #ifdef CONFIG_MULTICORE_RAID456 1591 #ifdef CONFIG_MULTICORE_RAID456
1592 init_waitqueue_head(&nsh->ops.wait_for_ops); 1592 init_waitqueue_head(&nsh->ops.wait_for_ops);
1593 #endif 1593 #endif
1594 spin_lock_init(&nsh->stripe_lock);
1594 1595
1595 list_add(&nsh->lru, &newstripes); 1596 list_add(&nsh->lru, &newstripes);
1596 } 1597 }
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec573..a6e74514e662 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1123} 1123}
1124#endif 1124#endif
1125 1125
1126static inline unsigned long get_vm_size(struct vm_area_struct *vma)
1127{
1128 return vma->vm_end - vma->vm_start;
1129}
1130
1131static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
1132{
1133 return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
1134}
1135
1136/*
1137 * Set a new vm offset.
1138 *
1139 * Verify that the incoming offset really works as a page offset,
1140 * and that the offset and size fit in a resource_size_t.
1141 */
1142static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
1143{
1144 pgoff_t pgoff = off >> PAGE_SHIFT;
1145 if (off != (resource_size_t) pgoff << PAGE_SHIFT)
1146 return -EINVAL;
1147 if (off + get_vm_size(vma) - 1 < off)
1148 return -EINVAL;
1149 vma->vm_pgoff = pgoff;
1150 return 0;
1151}
1152
1126/* 1153/*
1127 * set up a mapping for shared memory segments 1154 * set up a mapping for shared memory segments
1128 */ 1155 */
@@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1132 struct mtd_file_info *mfi = file->private_data; 1159 struct mtd_file_info *mfi = file->private_data;
1133 struct mtd_info *mtd = mfi->mtd; 1160 struct mtd_info *mtd = mfi->mtd;
1134 struct map_info *map = mtd->priv; 1161 struct map_info *map = mtd->priv;
1135 unsigned long start; 1162 resource_size_t start, off;
1136 unsigned long off; 1163 unsigned long len, vma_len;
1137 u32 len;
1138 1164
1139 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1165 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
1140 off = vma->vm_pgoff << PAGE_SHIFT; 1166 off = get_vm_offset(vma);
1141 start = map->phys; 1167 start = map->phys;
1142 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1168 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1143 start &= PAGE_MASK; 1169 start &= PAGE_MASK;
1144 if ((vma->vm_end - vma->vm_start + off) > len) 1170 vma_len = get_vm_size(vma);
1171
1172 /* Overflow in off+len? */
1173 if (vma_len + off < off)
1174 return -EINVAL;
1175 /* Does it fit in the mapping? */
1176 if (vma_len + off > len)
1145 return -EINVAL; 1177 return -EINVAL;
1146 1178
1147 off += start; 1179 off += start;
1148 vma->vm_pgoff = off >> PAGE_SHIFT; 1180 /* Did that overflow? */
1181 if (off < start)
1182 return -EINVAL;
1183 if (set_vm_offset(vma, off) < 0)
1184 return -EINVAL;
1149 vma->vm_flags |= VM_IO | VM_RESERVED; 1185 vma->vm_flags |= VM_IO | VM_RESERVED;
1150 1186
1151#ifdef pgprot_noncached 1187#ifdef pgprot_noncached
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 79cebd8525ce..e48312f2305d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8564 return 0; 8564 return 0;
8565 8565
8566error: 8566error:
8567 iounmap(bp->regview); 8567 pci_iounmap(pdev, bp->regview);
8568 pci_release_regions(pdev); 8568 pci_release_regions(pdev);
8569 pci_disable_device(pdev); 8569 pci_disable_device(pdev);
8570 pci_set_drvdata(pdev, NULL); 8570 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index c42bbb16cdae..a688a2ddcfd6 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
722 octeon_mgmt_adjust_link, 0, 722 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII); 723 PHY_INTERFACE_MODE_MII);
724 724
725 if (IS_ERR(p->phydev)) { 725 if (!p->phydev)
726 p->phydev = NULL;
727 return -1; 726 return -1;
728 }
729 727
730 phy_start_aneg(p->phydev); 728 phy_start_aneg(p->phydev);
731 729
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index e559dfa06d6a..6fa74d530e44 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
1101 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, 1101 phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
1102 PHY_INTERFACE_MODE_SGMII); 1102 PHY_INTERFACE_MODE_SGMII);
1103 1103
1104 if (IS_ERR(phydev)) { 1104 if (!phydev) {
1105 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); 1105 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
1106 return PTR_ERR(phydev); 1106 return -ENODEV;
1107 } 1107 }
1108 1108
1109 mac->phydev = phydev; 1109 mac->phydev = phydev;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b8ead696141e..2a179d087207 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
15 15
16 do { 16 do {
17 /* give atleast 1ms for firmware to respond */ 17 /* give atleast 1ms for firmware to respond */
18 msleep(1); 18 mdelay(1);
19 19
20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 20 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
21 return QLCNIC_CDRP_RSP_TIMEOUT; 21 return QLCNIC_CDRP_RSP_TIMEOUT;
@@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
601 qlcnic_fw_cmd_destroy_tx_ctx(adapter); 601 qlcnic_fw_cmd_destroy_tx_ctx(adapter);
602 602
603 /* Allow dma queues to drain after context reset */ 603 /* Allow dma queues to drain after context reset */
604 msleep(20); 604 mdelay(20);
605 } 605 }
606} 606}
607 607
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 2346b38b9837..799789518e87 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void)
229 ARRAY_SIZE(bcm87xx_driver)); 229 ARRAY_SIZE(bcm87xx_driver));
230} 230}
231module_exit(bcm87xx_exit); 231module_exit(bcm87xx_exit);
232
233MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf287e0eb408..2165d5fdb8c0 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -21,6 +21,12 @@
21#include <linux/phy.h> 21#include <linux/phy.h>
22#include <linux/micrel_phy.h> 22#include <linux/micrel_phy.h>
23 23
24/* Operation Mode Strap Override */
25#define MII_KSZPHY_OMSO 0x16
26#define KSZPHY_OMSO_B_CAST_OFF (1 << 9)
27#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1)
28#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0)
29
24/* general Interrupt control/status reg in vendor specific block. */ 30/* general Interrupt control/status reg in vendor specific block. */
25#define MII_KSZPHY_INTCS 0x1B 31#define MII_KSZPHY_INTCS 0x1B
26#define KSZPHY_INTCS_JABBER (1 << 15) 32#define KSZPHY_INTCS_JABBER (1 << 15)
@@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev)
101 return 0; 107 return 0;
102} 108}
103 109
110static int ksz8021_config_init(struct phy_device *phydev)
111{
112 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
113 phy_write(phydev, MII_KSZPHY_OMSO, val);
114 return 0;
115}
116
104static int ks8051_config_init(struct phy_device *phydev) 117static int ks8051_config_init(struct phy_device *phydev)
105{ 118{
106 int regval; 119 int regval;
@@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = {
128 .config_intr = ks8737_config_intr, 141 .config_intr = ks8737_config_intr,
129 .driver = { .owner = THIS_MODULE,}, 142 .driver = { .owner = THIS_MODULE,},
130}, { 143}, {
131 .phy_id = PHY_ID_KS8041, 144 .phy_id = PHY_ID_KSZ8021,
145 .phy_id_mask = 0x00ffffff,
146 .name = "Micrel KSZ8021",
147 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
148 SUPPORTED_Asym_Pause),
149 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
150 .config_init = ksz8021_config_init,
151 .config_aneg = genphy_config_aneg,
152 .read_status = genphy_read_status,
153 .ack_interrupt = kszphy_ack_interrupt,
154 .config_intr = kszphy_config_intr,
155 .driver = { .owner = THIS_MODULE,},
156}, {
157 .phy_id = PHY_ID_KSZ8041,
132 .phy_id_mask = 0x00fffff0, 158 .phy_id_mask = 0x00fffff0,
133 .name = "Micrel KS8041", 159 .name = "Micrel KSZ8041",
134 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 160 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
135 | SUPPORTED_Asym_Pause), 161 | SUPPORTED_Asym_Pause),
136 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 162 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = {
141 .config_intr = kszphy_config_intr, 167 .config_intr = kszphy_config_intr,
142 .driver = { .owner = THIS_MODULE,}, 168 .driver = { .owner = THIS_MODULE,},
143}, { 169}, {
144 .phy_id = PHY_ID_KS8051, 170 .phy_id = PHY_ID_KSZ8051,
145 .phy_id_mask = 0x00fffff0, 171 .phy_id_mask = 0x00fffff0,
146 .name = "Micrel KS8051", 172 .name = "Micrel KSZ8051",
147 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 173 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
148 | SUPPORTED_Asym_Pause), 174 | SUPPORTED_Asym_Pause),
149 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 175 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = {
154 .config_intr = kszphy_config_intr, 180 .config_intr = kszphy_config_intr,
155 .driver = { .owner = THIS_MODULE,}, 181 .driver = { .owner = THIS_MODULE,},
156}, { 182}, {
157 .phy_id = PHY_ID_KS8001, 183 .phy_id = PHY_ID_KSZ8001,
158 .name = "Micrel KS8001 or KS8721", 184 .name = "Micrel KSZ8001 or KS8721",
159 .phy_id_mask = 0x00ffffff, 185 .phy_id_mask = 0x00ffffff,
160 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), 186 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
161 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 187 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -201,10 +227,11 @@ MODULE_LICENSE("GPL");
201 227
202static struct mdio_device_id __maybe_unused micrel_tbl[] = { 228static struct mdio_device_id __maybe_unused micrel_tbl[] = {
203 { PHY_ID_KSZ9021, 0x000ffffe }, 229 { PHY_ID_KSZ9021, 0x000ffffe },
204 { PHY_ID_KS8001, 0x00ffffff }, 230 { PHY_ID_KSZ8001, 0x00ffffff },
205 { PHY_ID_KS8737, 0x00fffff0 }, 231 { PHY_ID_KS8737, 0x00fffff0 },
206 { PHY_ID_KS8041, 0x00fffff0 }, 232 { PHY_ID_KSZ8021, 0x00ffffff },
207 { PHY_ID_KS8051, 0x00fffff0 }, 233 { PHY_ID_KSZ8041, 0x00fffff0 },
234 { PHY_ID_KSZ8051, 0x00fffff0 },
208 { } 235 { }
209}; 236};
210 237
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 6d6192316b30..88e3991464e7 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev)
56 return smsc_phy_ack_interrupt (phydev); 56 return smsc_phy_ack_interrupt (phydev);
57} 57}
58 58
59static int lan87xx_config_init(struct phy_device *phydev)
60{
61 /*
62 * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
63 * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
64 * to a bug on the chip.
65 *
66 * When the system is powered on with the network cable being
67 * disconnected all the way until after ifconfig ethX up is
68 * issued for the LAN port with this PHY, connecting the cable
69 * afterwards does not cause LINK change detection, while the
70 * expected behavior is the Link UP being detected.
71 */
72 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
73 if (rc < 0)
74 return rc;
75
76 rc &= ~MII_LAN83C185_EDPWRDOWN;
77
78 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
79 if (rc < 0)
80 return rc;
81
82 return smsc_phy_ack_interrupt(phydev);
83}
84
59static int lan911x_config_init(struct phy_device *phydev) 85static int lan911x_config_init(struct phy_device *phydev)
60{ 86{
61 return smsc_phy_ack_interrupt(phydev); 87 return smsc_phy_ack_interrupt(phydev);
@@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = {
162 /* basic functions */ 188 /* basic functions */
163 .config_aneg = genphy_config_aneg, 189 .config_aneg = genphy_config_aneg,
164 .read_status = genphy_read_status, 190 .read_status = genphy_read_status,
165 .config_init = smsc_phy_config_init, 191 .config_init = lan87xx_config_init,
166 192
167 /* IRQ related */ 193 /* IRQ related */
168 .ack_interrupt = smsc_phy_ack_interrupt, 194 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index cbf7047decc0..20f31d0d1536 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)
570 570
571 po = pppox_sk(sk); 571 po = pppox_sk(sk);
572 572
573 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 573 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
574 dev_put(po->pppoe_dev); 574 dev_put(po->pppoe_dev);
575 po->pppoe_dev = NULL; 575 po->pppoe_dev = NULL;
576 } 576 }
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 341b65dbbcd3..f8cd61f449a4 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -848,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
848} 848}
849#endif 849#endif
850 850
851static void __team_port_change_check(struct team_port *port, bool linkup); 851static void __team_port_change_port_added(struct team_port *port, bool linkup);
852 852
853static int team_port_add(struct team *team, struct net_device *port_dev) 853static int team_port_add(struct team *team, struct net_device *port_dev)
854{ 854{
@@ -948,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
948 team_port_enable(team, port); 948 team_port_enable(team, port);
949 list_add_tail_rcu(&port->list, &team->port_list); 949 list_add_tail_rcu(&port->list, &team->port_list);
950 __team_compute_features(team); 950 __team_compute_features(team);
951 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 951 __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
952 __team_options_change_check(team); 952 __team_options_change_check(team);
953 953
954 netdev_info(dev, "Port device %s added\n", portname); 954 netdev_info(dev, "Port device %s added\n", portname);
@@ -983,6 +983,8 @@ err_set_mtu:
983 return err; 983 return err;
984} 984}
985 985
986static void __team_port_change_port_removed(struct team_port *port);
987
986static int team_port_del(struct team *team, struct net_device *port_dev) 988static int team_port_del(struct team *team, struct net_device *port_dev)
987{ 989{
988 struct net_device *dev = team->dev; 990 struct net_device *dev = team->dev;
@@ -999,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
999 __team_option_inst_mark_removed_port(team, port); 1001 __team_option_inst_mark_removed_port(team, port);
1000 __team_options_change_check(team); 1002 __team_options_change_check(team);
1001 __team_option_inst_del_port(team, port); 1003 __team_option_inst_del_port(team, port);
1002 port->removed = true; 1004 __team_port_change_port_removed(port);
1003 __team_port_change_check(port, false);
1004 team_port_disable(team, port); 1005 team_port_disable(team, port);
1005 list_del_rcu(&port->list); 1006 list_del_rcu(&port->list);
1006 netdev_rx_handler_unregister(port_dev); 1007 netdev_rx_handler_unregister(port_dev);
@@ -1652,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1652 1653
1653 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 1654 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1654 &team_nl_family, 0, TEAM_CMD_NOOP); 1655 &team_nl_family, 0, TEAM_CMD_NOOP);
1655 if (IS_ERR(hdr)) { 1656 if (!hdr) {
1656 err = PTR_ERR(hdr); 1657 err = -EMSGSIZE;
1657 goto err_msg_put; 1658 goto err_msg_put;
1658 } 1659 }
1659 1660
@@ -1847,8 +1848,8 @@ start_again:
1847 1848
1848 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, 1849 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1849 TEAM_CMD_OPTIONS_GET); 1850 TEAM_CMD_OPTIONS_GET);
1850 if (IS_ERR(hdr)) 1851 if (!hdr)
1851 return PTR_ERR(hdr); 1852 return -EMSGSIZE;
1852 1853
1853 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 1854 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1854 goto nla_put_failure; 1855 goto nla_put_failure;
@@ -2067,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
2067 2068
2068 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 2069 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
2069 TEAM_CMD_PORT_LIST_GET); 2070 TEAM_CMD_PORT_LIST_GET);
2070 if (IS_ERR(hdr)) 2071 if (!hdr)
2071 return PTR_ERR(hdr); 2072 return -EMSGSIZE;
2072 2073
2073 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) 2074 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2074 goto nla_put_failure; 2075 goto nla_put_failure;
@@ -2251,13 +2252,11 @@ static void __team_options_change_check(struct team *team)
2251} 2252}
2252 2253
2253/* rtnl lock is held */ 2254/* rtnl lock is held */
2254static void __team_port_change_check(struct team_port *port, bool linkup) 2255
2256static void __team_port_change_send(struct team_port *port, bool linkup)
2255{ 2257{
2256 int err; 2258 int err;
2257 2259
2258 if (!port->removed && port->state.linkup == linkup)
2259 return;
2260
2261 port->changed = true; 2260 port->changed = true;
2262 port->state.linkup = linkup; 2261 port->state.linkup = linkup;
2263 team_refresh_port_linkup(port); 2262 team_refresh_port_linkup(port);
@@ -2282,6 +2281,23 @@ send_event:
2282 2281
2283} 2282}
2284 2283
2284static void __team_port_change_check(struct team_port *port, bool linkup)
2285{
2286 if (port->state.linkup != linkup)
2287 __team_port_change_send(port, linkup);
2288}
2289
2290static void __team_port_change_port_added(struct team_port *port, bool linkup)
2291{
2292 __team_port_change_send(port, linkup);
2293}
2294
2295static void __team_port_change_port_removed(struct team_port *port)
2296{
2297 port->removed = true;
2298 __team_port_change_send(port, false);
2299}
2300
2285static void team_port_change_check(struct team_port *port, bool linkup) 2301static void team_port_change_check(struct team_port *port, bool linkup)
2286{ 2302{
2287 struct team *team = port->team; 2303 struct team *team = port->team;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f5ab6e613ec8..376143e8a1aa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = {
1253 .probe = usbnet_probe, 1253 .probe = usbnet_probe,
1254 .suspend = usbnet_suspend, 1254 .suspend = usbnet_suspend,
1255 .resume = usbnet_resume, 1255 .resume = usbnet_resume,
1256 .reset_resume = usbnet_resume,
1256 .disconnect = usbnet_disconnect, 1257 .disconnect = usbnet_disconnect,
1257 .disable_hub_initiated_lpm = 1, 1258 .disable_hub_initiated_lpm = 1,
1258}; 1259};
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1e86ea2266d4..dbeebef562d5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1442 return err; 1442 return err;
1443 1443
1444err_free_irq: 1444err_free_irq:
1445 trans_pcie->irq_requested = false;
1445 free_irq(trans_pcie->irq, trans); 1446 free_irq(trans_pcie->irq, trans);
1446error: 1447error:
1447 iwl_free_isr_ict(trans); 1448 iwl_free_isr_ict(trans);
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
index a3ac39b79192..0646bf6e7889 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/sh/pfc/pinctrl.c
@@ -208,6 +208,8 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
208 208
209 break; 209 break;
210 case PINMUX_TYPE_GPIO: 210 case PINMUX_TYPE_GPIO:
211 case PINMUX_TYPE_INPUT:
212 case PINMUX_TYPE_OUTPUT:
211 break; 213 break;
212 default: 214 default:
213 pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type); 215 pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index d95696584762..3440812b4a84 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
624 /* print devices for all busses */ 624 /* print devices for all busses */
625 list_for_each_entry(bus, &usb_bus_list, bus_list) { 625 list_for_each_entry(bus, &usb_bus_list, bus_list) {
626 /* recurse through all children of the root hub */ 626 /* recurse through all children of the root hub */
627 if (!bus->root_hub) 627 if (!bus_to_hcd(bus)->rh_registered)
628 continue; 628 continue;
629 usb_lock_device(bus->root_hub); 629 usb_lock_device(bus->root_hub);
630 ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, 630 ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index bc84106ac057..75ba2091f9b4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
1011 if (retval) { 1011 if (retval) {
1012 dev_err (parent_dev, "can't register root hub for %s, %d\n", 1012 dev_err (parent_dev, "can't register root hub for %s, %d\n",
1013 dev_name(&usb_dev->dev), retval); 1013 dev_name(&usb_dev->dev), retval);
1014 } 1014 } else {
1015 mutex_unlock(&usb_bus_list_lock);
1016
1017 if (retval == 0) {
1018 spin_lock_irq (&hcd_root_hub_lock); 1015 spin_lock_irq (&hcd_root_hub_lock);
1019 hcd->rh_registered = 1; 1016 hcd->rh_registered = 1;
1020 spin_unlock_irq (&hcd_root_hub_lock); 1017 spin_unlock_irq (&hcd_root_hub_lock);
@@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd)
1023 if (HCD_DEAD(hcd)) 1020 if (HCD_DEAD(hcd))
1024 usb_hc_died (hcd); /* This time clean up */ 1021 usb_hc_died (hcd); /* This time clean up */
1025 } 1022 }
1023 mutex_unlock(&usb_bus_list_lock);
1026 1024
1027 return retval; 1025 return retval;
1028} 1026}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index aaa8d2bce217..0bf72f943b00 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
467 /* From the GPIO notifying the over-current situation, find 467 /* From the GPIO notifying the over-current situation, find
468 * out the corresponding port */ 468 * out the corresponding port */
469 at91_for_each_port(port) { 469 at91_for_each_port(port) {
470 if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { 470 if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
471 gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
471 gpio = pdata->overcurrent_pin[port]; 472 gpio = pdata->overcurrent_pin[port];
472 break; 473 break;
473 } 474 }
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 211a4920b88a..d8dedc7d3910 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
76 schedule_work(&virqfd->inject); 76 schedule_work(&virqfd->inject);
77 } 77 }
78 78
79 if (flags & POLLHUP) 79 if (flags & POLLHUP) {
80 /* The eventfd is closing, detach from VFIO */ 80 unsigned long flags;
81 virqfd_deactivate(virqfd); 81 spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
82
83 /*
84 * The eventfd is closing, if the virqfd has not yet been
85 * queued for release, as determined by testing whether the
86 * vdev pointer to it is still valid, queue it now. As
87 * with kvm irqfds, we know we won't race against the virqfd
88 * going away because we hold wqh->lock to get here.
89 */
90 if (*(virqfd->pvirqfd) == virqfd) {
91 *(virqfd->pvirqfd) = NULL;
92 virqfd_deactivate(virqfd);
93 }
94
95 spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
96 }
82 97
83 return 0; 98 return 0;
84} 99}
@@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file,
93static void virqfd_shutdown(struct work_struct *work) 108static void virqfd_shutdown(struct work_struct *work)
94{ 109{
95 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); 110 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
96 struct virqfd **pvirqfd = virqfd->pvirqfd;
97 u64 cnt; 111 u64 cnt;
98 112
99 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); 113 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
@@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work)
101 eventfd_ctx_put(virqfd->eventfd); 115 eventfd_ctx_put(virqfd->eventfd);
102 116
103 kfree(virqfd); 117 kfree(virqfd);
104 *pvirqfd = NULL;
105} 118}
106 119
107static void virqfd_inject(struct work_struct *work) 120static void virqfd_inject(struct work_struct *work)
@@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
122 int ret = 0; 135 int ret = 0;
123 unsigned int events; 136 unsigned int events;
124 137
125 if (*pvirqfd)
126 return -EBUSY;
127
128 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); 138 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
129 if (!virqfd) 139 if (!virqfd)
130 return -ENOMEM; 140 return -ENOMEM;
131 141
132 virqfd->pvirqfd = pvirqfd; 142 virqfd->pvirqfd = pvirqfd;
133 *pvirqfd = virqfd;
134 virqfd->vdev = vdev; 143 virqfd->vdev = vdev;
135 virqfd->handler = handler; 144 virqfd->handler = handler;
136 virqfd->thread = thread; 145 virqfd->thread = thread;
@@ -154,6 +163,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
154 virqfd->eventfd = ctx; 163 virqfd->eventfd = ctx;
155 164
156 /* 165 /*
166 * virqfds can be released by closing the eventfd or directly
167 * through ioctl. These are both done through a workqueue, so
168 * we update the pointer to the virqfd under lock to avoid
169 * pushing multiple jobs to release the same virqfd.
170 */
171 spin_lock_irq(&vdev->irqlock);
172
173 if (*pvirqfd) {
174 spin_unlock_irq(&vdev->irqlock);
175 ret = -EBUSY;
176 goto fail;
177 }
178 *pvirqfd = virqfd;
179
180 spin_unlock_irq(&vdev->irqlock);
181
182 /*
157 * Install our own custom wake-up handling so we are notified via 183 * Install our own custom wake-up handling so we are notified via
158 * a callback whenever someone signals the underlying eventfd. 184 * a callback whenever someone signals the underlying eventfd.
159 */ 185 */
@@ -187,19 +213,29 @@ fail:
187 fput(file); 213 fput(file);
188 214
189 kfree(virqfd); 215 kfree(virqfd);
190 *pvirqfd = NULL;
191 216
192 return ret; 217 return ret;
193} 218}
194 219
195static void virqfd_disable(struct virqfd *virqfd) 220static void virqfd_disable(struct vfio_pci_device *vdev,
221 struct virqfd **pvirqfd)
196{ 222{
197 if (!virqfd) 223 unsigned long flags;
198 return; 224
225 spin_lock_irqsave(&vdev->irqlock, flags);
226
227 if (*pvirqfd) {
228 virqfd_deactivate(*pvirqfd);
229 *pvirqfd = NULL;
230 }
199 231
200 virqfd_deactivate(virqfd); 232 spin_unlock_irqrestore(&vdev->irqlock, flags);
201 233
202 /* Block until we know all outstanding shutdown jobs have completed. */ 234 /*
235 * Block until we know all outstanding shutdown jobs have completed.
236 * Even if we don't queue the job, flush the wq to be sure it's
237 * been released.
238 */
203 flush_workqueue(vfio_irqfd_cleanup_wq); 239 flush_workqueue(vfio_irqfd_cleanup_wq);
204} 240}
205 241
@@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
392static void vfio_intx_disable(struct vfio_pci_device *vdev) 428static void vfio_intx_disable(struct vfio_pci_device *vdev)
393{ 429{
394 vfio_intx_set_signal(vdev, -1); 430 vfio_intx_set_signal(vdev, -1);
395 virqfd_disable(vdev->ctx[0].unmask); 431 virqfd_disable(vdev, &vdev->ctx[0].unmask);
396 virqfd_disable(vdev->ctx[0].mask); 432 virqfd_disable(vdev, &vdev->ctx[0].mask);
397 vdev->irq_type = VFIO_PCI_NUM_IRQS; 433 vdev->irq_type = VFIO_PCI_NUM_IRQS;
398 vdev->num_ctx = 0; 434 vdev->num_ctx = 0;
399 kfree(vdev->ctx); 435 kfree(vdev->ctx);
@@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
539 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); 575 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
540 576
541 for (i = 0; i < vdev->num_ctx; i++) { 577 for (i = 0; i < vdev->num_ctx; i++) {
542 virqfd_disable(vdev->ctx[i].unmask); 578 virqfd_disable(vdev, &vdev->ctx[i].unmask);
543 virqfd_disable(vdev->ctx[i].mask); 579 virqfd_disable(vdev, &vdev->ctx[i].mask);
544 } 580 }
545 581
546 if (msix) { 582 if (msix) {
@@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
577 vfio_send_intx_eventfd, NULL, 613 vfio_send_intx_eventfd, NULL,
578 &vdev->ctx[0].unmask, fd); 614 &vdev->ctx[0].unmask, fd);
579 615
580 virqfd_disable(vdev->ctx[0].unmask); 616 virqfd_disable(vdev, &vdev->ctx[0].unmask);
581 } 617 }
582 618
583 return 0; 619 return 0;
diff --git a/fs/dcache.c b/fs/dcache.c
index 16521a9f2038..693f95bf1cae 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1134,6 +1134,8 @@ positive:
1134 return 1; 1134 return 1;
1135 1135
1136rename_retry: 1136rename_retry:
1137 if (locked)
1138 goto again;
1137 locked = 1; 1139 locked = 1;
1138 write_seqlock(&rename_lock); 1140 write_seqlock(&rename_lock);
1139 goto again; 1141 goto again;
@@ -1141,7 +1143,7 @@ rename_retry:
1141EXPORT_SYMBOL(have_submounts); 1143EXPORT_SYMBOL(have_submounts);
1142 1144
1143/* 1145/*
1144 * Search the dentry child list for the specified parent, 1146 * Search the dentry child list of the specified parent,
1145 * and move any unused dentries to the end of the unused 1147 * and move any unused dentries to the end of the unused
1146 * list for prune_dcache(). We descend to the next level 1148 * list for prune_dcache(). We descend to the next level
1147 * whenever the d_subdirs list is non-empty and continue 1149 * whenever the d_subdirs list is non-empty and continue
@@ -1236,6 +1238,8 @@ out:
1236rename_retry: 1238rename_retry:
1237 if (found) 1239 if (found)
1238 return found; 1240 return found;
1241 if (locked)
1242 goto again;
1239 locked = 1; 1243 locked = 1;
1240 write_seqlock(&rename_lock); 1244 write_seqlock(&rename_lock);
1241 goto again; 1245 goto again;
@@ -3035,6 +3039,8 @@ resume:
3035 return; 3039 return;
3036 3040
3037rename_retry: 3041rename_retry:
3042 if (locked)
3043 goto again;
3038 locked = 1; 3044 locked = 1;
3039 write_seqlock(&rename_lock); 3045 write_seqlock(&rename_lock);
3040 goto again; 3046 goto again;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index fb1a2bedbe97..8d80c990dffd 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -289,7 +289,6 @@ static void nlmsvc_free_block(struct kref *kref)
289 dprintk("lockd: freeing block %p...\n", block); 289 dprintk("lockd: freeing block %p...\n", block);
290 290
291 /* Remove block from file's list of blocks */ 291 /* Remove block from file's list of blocks */
292 mutex_lock(&file->f_mutex);
293 list_del_init(&block->b_flist); 292 list_del_init(&block->b_flist);
294 mutex_unlock(&file->f_mutex); 293 mutex_unlock(&file->f_mutex);
295 294
@@ -303,7 +302,7 @@ static void nlmsvc_free_block(struct kref *kref)
303static void nlmsvc_release_block(struct nlm_block *block) 302static void nlmsvc_release_block(struct nlm_block *block)
304{ 303{
305 if (block != NULL) 304 if (block != NULL)
306 kref_put(&block->b_count, nlmsvc_free_block); 305 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
307} 306}
308 307
309/* 308/*
diff --git a/fs/namespace.c b/fs/namespace.c
index 4d31f73e2561..7bdf7907413f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1886,8 +1886,14 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
1886 return err; 1886 return err;
1887 1887
1888 err = -EINVAL; 1888 err = -EINVAL;
1889 if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt))) 1889 if (unlikely(!check_mnt(real_mount(path->mnt)))) {
1890 goto unlock; 1890 /* that's acceptable only for automounts done in private ns */
1891 if (!(mnt_flags & MNT_SHRINKABLE))
1892 goto unlock;
1893 /* ... and for those we'd better have mountpoint still alive */
1894 if (!real_mount(path->mnt)->mnt_ns)
1895 goto unlock;
1896 }
1891 1897
1892 /* Refuse the same filesystem on the same mount point */ 1898 /* Refuse the same filesystem on the same mount point */
1893 err = -EBUSY; 1899 err = -EBUSY;
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 991ef01cd77e..3748ec92dcbc 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -691,9 +691,11 @@ __SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
691#define __NR_process_vm_writev 271 691#define __NR_process_vm_writev 271
692__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ 692__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
693 compat_sys_process_vm_writev) 693 compat_sys_process_vm_writev)
694#define __NR_kcmp 272
695__SYSCALL(__NR_kcmp, sys_kcmp)
694 696
695#undef __NR_syscalls 697#undef __NR_syscalls
696#define __NR_syscalls 272 698#define __NR_syscalls 273
697 699
698/* 700/*
699 * All syscalls below here should go away really, 701 * All syscalls below here should go away really,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7e83370e6fd2..f3b99e1c1042 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -256,72 +256,78 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
256{ 256{
257} 257}
258 258
259int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) 259static inline int iommu_attach_group(struct iommu_domain *domain,
260 struct iommu_group *group)
260{ 261{
261 return -ENODEV; 262 return -ENODEV;
262} 263}
263 264
264void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) 265static inline void iommu_detach_group(struct iommu_domain *domain,
266 struct iommu_group *group)
265{ 267{
266} 268}
267 269
268struct iommu_group *iommu_group_alloc(void) 270static inline struct iommu_group *iommu_group_alloc(void)
269{ 271{
270 return ERR_PTR(-ENODEV); 272 return ERR_PTR(-ENODEV);
271} 273}
272 274
273void *iommu_group_get_iommudata(struct iommu_group *group) 275static inline void *iommu_group_get_iommudata(struct iommu_group *group)
274{ 276{
275 return NULL; 277 return NULL;
276} 278}
277 279
278void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, 280static inline void iommu_group_set_iommudata(struct iommu_group *group,
279 void (*release)(void *iommu_data)) 281 void *iommu_data,
282 void (*release)(void *iommu_data))
280{ 283{
281} 284}
282 285
283int iommu_group_set_name(struct iommu_group *group, const char *name) 286static inline int iommu_group_set_name(struct iommu_group *group,
287 const char *name)
284{ 288{
285 return -ENODEV; 289 return -ENODEV;
286} 290}
287 291
288int iommu_group_add_device(struct iommu_group *group, struct device *dev) 292static inline int iommu_group_add_device(struct iommu_group *group,
293 struct device *dev)
289{ 294{
290 return -ENODEV; 295 return -ENODEV;
291} 296}
292 297
293void iommu_group_remove_device(struct device *dev) 298static inline void iommu_group_remove_device(struct device *dev)
294{ 299{
295} 300}
296 301
297int iommu_group_for_each_dev(struct iommu_group *group, void *data, 302static inline int iommu_group_for_each_dev(struct iommu_group *group,
298 int (*fn)(struct device *, void *)) 303 void *data,
304 int (*fn)(struct device *, void *))
299{ 305{
300 return -ENODEV; 306 return -ENODEV;
301} 307}
302 308
303struct iommu_group *iommu_group_get(struct device *dev) 309static inline struct iommu_group *iommu_group_get(struct device *dev)
304{ 310{
305 return NULL; 311 return NULL;
306} 312}
307 313
308void iommu_group_put(struct iommu_group *group) 314static inline void iommu_group_put(struct iommu_group *group)
309{ 315{
310} 316}
311 317
312int iommu_group_register_notifier(struct iommu_group *group, 318static inline int iommu_group_register_notifier(struct iommu_group *group,
313 struct notifier_block *nb) 319 struct notifier_block *nb)
314{ 320{
315 return -ENODEV; 321 return -ENODEV;
316} 322}
317 323
318int iommu_group_unregister_notifier(struct iommu_group *group, 324static inline int iommu_group_unregister_notifier(struct iommu_group *group,
319 struct notifier_block *nb) 325 struct notifier_block *nb)
320{ 326{
321 return 0; 327 return 0;
322} 328}
323 329
324int iommu_group_id(struct iommu_group *group) 330static inline int iommu_group_id(struct iommu_group *group)
325{ 331{
326 return -ENODEV; 332 return -ENODEV;
327} 333}
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 61f0905bdc48..de201203bc7c 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -1,3 +1,15 @@
1/*
2 * include/linux/micrel_phy.h
3 *
4 * Micrel PHY IDs
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */
12
1#ifndef _MICREL_PHY_H 13#ifndef _MICREL_PHY_H
2#define _MICREL_PHY_H 14#define _MICREL_PHY_H
3 15
@@ -5,10 +17,11 @@
5 17
6#define PHY_ID_KSZ9021 0x00221610 18#define PHY_ID_KSZ9021 0x00221610
7#define PHY_ID_KS8737 0x00221720 19#define PHY_ID_KS8737 0x00221720
8#define PHY_ID_KS8041 0x00221510 20#define PHY_ID_KSZ8021 0x00221555
9#define PHY_ID_KS8051 0x00221550 21#define PHY_ID_KSZ8041 0x00221510
22#define PHY_ID_KSZ8051 0x00221550
10/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ 23/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
11#define PHY_ID_KS8001 0x0022161A 24#define PHY_ID_KSZ8001 0x0022161A
12 25
13/* struct phy_device dev_flags definitions */ 26/* struct phy_device dev_flags definitions */
14#define MICREL_PHY_50MHZ_CLK 0x00000001 27#define MICREL_PHY_50MHZ_CLK 0x00000001
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 9490a00529f4..c25cccaa555a 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -35,8 +35,10 @@ struct nvme_bar {
35 __u64 acq; /* Admin CQ Base Address */ 35 __u64 acq; /* Admin CQ Base Address */
36}; 36};
37 37
38#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) 39#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) 40#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
41#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
40 42
41enum { 43enum {
42 NVME_CC_ENABLE = 1 << 0, 44 NVME_CC_ENABLE = 1 << 0,
diff --git a/include/linux/security.h b/include/linux/security.h
index 3dea6a9d568f..d143b8e01954 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -118,6 +118,7 @@ void reset_security_ops(void);
118extern unsigned long mmap_min_addr; 118extern unsigned long mmap_min_addr;
119extern unsigned long dac_mmap_min_addr; 119extern unsigned long dac_mmap_min_addr;
120#else 120#else
121#define mmap_min_addr 0UL
121#define dac_mmap_min_addr 0UL 122#define dac_mmap_min_addr 0UL
122#endif 123#endif
123 124
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index c785554f9523..ebf3bac460b0 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
62 */ 62 */
63bool fprop_new_period(struct fprop_global *p, int periods) 63bool fprop_new_period(struct fprop_global *p, int periods)
64{ 64{
65 u64 events; 65 s64 events;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 local_irq_save(flags); 68 local_irq_save(flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 57c4b9309015..141dbb695097 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1811,7 +1811,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1811 src_page = pte_page(pteval); 1811 src_page = pte_page(pteval);
1812 copy_user_highpage(page, src_page, address, vma); 1812 copy_user_highpage(page, src_page, address, vma);
1813 VM_BUG_ON(page_mapcount(src_page) != 1); 1813 VM_BUG_ON(page_mapcount(src_page) != 1);
1814 VM_BUG_ON(page_count(src_page) != 2);
1815 release_pte_page(src_page); 1814 release_pte_page(src_page);
1816 /* 1815 /*
1817 * ptl mostly unnecessary, but preempt has to 1816 * ptl mostly unnecessary, but preempt has to
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index e877af8bdd1e..469daabd90c7 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -642,7 +642,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
642 struct batadv_neigh_node *router = NULL; 642 struct batadv_neigh_node *router = NULL;
643 struct batadv_orig_node *orig_node_tmp; 643 struct batadv_orig_node *orig_node_tmp;
644 struct hlist_node *node; 644 struct hlist_node *node;
645 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 645 int if_num;
646 uint8_t sum_orig, sum_neigh;
646 uint8_t *neigh_addr; 647 uint8_t *neigh_addr;
647 648
648 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 649 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -727,17 +728,17 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
727 if (router && (neigh_node->tq_avg == router->tq_avg)) { 728 if (router && (neigh_node->tq_avg == router->tq_avg)) {
728 orig_node_tmp = router->orig_node; 729 orig_node_tmp = router->orig_node;
729 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 730 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
730 bcast_own_sum_orig = 731 if_num = router->if_incoming->if_num;
731 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 732 sum_orig = orig_node_tmp->bcast_own_sum[if_num];
732 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 733 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
733 734
734 orig_node_tmp = neigh_node->orig_node; 735 orig_node_tmp = neigh_node->orig_node;
735 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 736 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
736 bcast_own_sum_neigh = 737 if_num = neigh_node->if_incoming->if_num;
737 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 738 sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
738 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 739 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
739 740
740 if (bcast_own_sum_orig >= bcast_own_sum_neigh) 741 if (sum_orig >= sum_neigh)
741 goto update_tt; 742 goto update_tt;
742 } 743 }
743 744
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 109ea2aae96c..21c53577c8d6 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -100,18 +100,21 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
100{ 100{
101 struct batadv_priv *bat_priv = netdev_priv(dev); 101 struct batadv_priv *bat_priv = netdev_priv(dev);
102 struct sockaddr *addr = p; 102 struct sockaddr *addr = p;
103 uint8_t old_addr[ETH_ALEN];
103 104
104 if (!is_valid_ether_addr(addr->sa_data)) 105 if (!is_valid_ether_addr(addr->sa_data))
105 return -EADDRNOTAVAIL; 106 return -EADDRNOTAVAIL;
106 107
108 memcpy(old_addr, dev->dev_addr, ETH_ALEN);
109 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
110
107 /* only modify transtable if it has been initialized before */ 111 /* only modify transtable if it has been initialized before */
108 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { 112 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
109 batadv_tt_local_remove(bat_priv, dev->dev_addr, 113 batadv_tt_local_remove(bat_priv, old_addr,
110 "mac address changed", false); 114 "mac address changed", false);
111 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); 115 batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
112 } 116 }
113 117
114 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
115 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 118 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
116 return 0; 119 return 0;
117} 120}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d4de5db18d5a..0b997c8f9655 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -734,6 +734,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
734 734
735 cancel_work_sync(&hdev->le_scan); 735 cancel_work_sync(&hdev->le_scan);
736 736
737 cancel_delayed_work(&hdev->power_off);
738
737 hci_req_cancel(hdev, ENODEV); 739 hci_req_cancel(hdev, ENODEV);
738 hci_req_lock(hdev); 740 hci_req_lock(hdev);
739 741
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4ea1710a4783..38c00f142203 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1008,7 +1008,7 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
1008 if (!conn) 1008 if (!conn)
1009 return; 1009 return;
1010 1010
1011 if (chan->mode == L2CAP_MODE_ERTM) { 1011 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1012 __clear_retrans_timer(chan); 1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan); 1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan); 1014 __clear_ack_timer(chan);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ad6613d17ca6..eba022de3c20 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2875,6 +2875,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
2875 if (scan) 2875 if (scan)
2876 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 2876 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2877 2877
2878 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2879 u8 ssp = 1;
2880
2881 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
2882 }
2883
2884 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2885 struct hci_cp_write_le_host_supported cp;
2886
2887 cp.le = 1;
2888 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
2889
2890 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2891 sizeof(cp), &cp);
2892 }
2893
2878 update_class(hdev); 2894 update_class(hdev);
2879 update_name(hdev, hdev->dev_name); 2895 update_name(hdev, hdev->dev_name);
2880 update_eir(hdev); 2896 update_eir(hdev);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 24c5eea8c45b..159aa8bef9e7 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1073,16 +1073,13 @@ static int write_partial_msg_pages(struct ceph_connection *con)
1073 BUG_ON(kaddr == NULL); 1073 BUG_ON(kaddr == NULL);
1074 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1074 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
1075 crc = crc32c(crc, base, len); 1075 crc = crc32c(crc, base, len);
1076 kunmap(page);
1076 msg->footer.data_crc = cpu_to_le32(crc); 1077 msg->footer.data_crc = cpu_to_le32(crc);
1077 con->out_msg_pos.did_page_crc = true; 1078 con->out_msg_pos.did_page_crc = true;
1078 } 1079 }
1079 ret = ceph_tcp_sendpage(con->sock, page, 1080 ret = ceph_tcp_sendpage(con->sock, page,
1080 con->out_msg_pos.page_pos + bio_offset, 1081 con->out_msg_pos.page_pos + bio_offset,
1081 len, 1); 1082 len, 1);
1082
1083 if (do_datacrc)
1084 kunmap(page);
1085
1086 if (ret <= 0) 1083 if (ret <= 0)
1087 goto out; 1084 goto out;
1088 1085
diff --git a/net/core/sock.c b/net/core/sock.c
index 305792076121..a6000fbad294 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -691,7 +691,8 @@ set_rcvbuf:
691 691
692 case SO_KEEPALIVE: 692 case SO_KEEPALIVE:
693#ifdef CONFIG_INET 693#ifdef CONFIG_INET
694 if (sk->sk_protocol == IPPROTO_TCP) 694 if (sk->sk_protocol == IPPROTO_TCP &&
695 sk->sk_type == SOCK_STREAM)
695 tcp_set_keepalive(sk, valbool); 696 tcp_set_keepalive(sk, valbool);
696#endif 697#endif
697 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 698 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e1e0a4e8fd34..c7527f6b9ad9 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -510,7 +510,10 @@ relookup:
510 secure_ipv6_id(daddr->addr.a6)); 510 secure_ipv6_id(daddr->addr.a6));
511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 511 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
512 p->rate_tokens = 0; 512 p->rate_tokens = 0;
513 p->rate_last = 0; 513 /* 60*HZ is arbitrary, but chosen enough high so that the first
514 * calculation of tokens is at its maximum.
515 */
516 p->rate_last = jiffies - 60*HZ;
514 INIT_LIST_HEAD(&p->gc_list); 517 INIT_LIST_HEAD(&p->gc_list);
515 518
516 /* Link the node. */ 519 /* Link the node. */
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ff0f071969ea..d23c6571ba1c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -131,18 +131,20 @@ found:
131 * 0 - deliver 131 * 0 - deliver
132 * 1 - block 132 * 1 - block
133 */ 133 */
134static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) 134static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
135{ 135{
136 int type; 136 struct icmphdr _hdr;
137 const struct icmphdr *hdr;
137 138
138 if (!pskb_may_pull(skb, sizeof(struct icmphdr))) 139 hdr = skb_header_pointer(skb, skb_transport_offset(skb),
140 sizeof(_hdr), &_hdr);
141 if (!hdr)
139 return 1; 142 return 1;
140 143
141 type = icmp_hdr(skb)->type; 144 if (hdr->type < 32) {
142 if (type < 32) {
143 __u32 data = raw_sk(sk)->filter.data; 145 __u32 data = raw_sk(sk)->filter.data;
144 146
145 return ((1 << type) & data) != 0; 147 return ((1U << hdr->type) & data) != 0;
146 } 148 }
147 149
148 /* Do not block unknown ICMP types */ 150 /* Do not block unknown ICMP types */
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 5b087c31d87b..0f9bdc5ee9f3 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -86,28 +86,30 @@ static int mip6_mh_len(int type)
86 86
87static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) 87static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
88{ 88{
89 struct ip6_mh *mh; 89 struct ip6_mh _hdr;
90 const struct ip6_mh *mh;
90 91
91 if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || 92 mh = skb_header_pointer(skb, skb_transport_offset(skb),
92 !pskb_may_pull(skb, (skb_transport_offset(skb) + 93 sizeof(_hdr), &_hdr);
93 ((skb_transport_header(skb)[1] + 1) << 3)))) 94 if (!mh)
94 return -1; 95 return -1;
95 96
96 mh = (struct ip6_mh *)skb_transport_header(skb); 97 if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
98 return -1;
97 99
98 if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { 100 if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
99 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", 101 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
100 mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); 102 mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
101 mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - 103 mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
102 skb_network_header(skb))); 104 skb_network_header_len(skb));
103 return -1; 105 return -1;
104 } 106 }
105 107
106 if (mh->ip6mh_proto != IPPROTO_NONE) { 108 if (mh->ip6mh_proto != IPPROTO_NONE) {
107 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", 109 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
108 mh->ip6mh_proto); 110 mh->ip6mh_proto);
109 mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - 111 mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
110 skb_network_header(skb))); 112 skb_network_header_len(skb));
111 return -1; 113 return -1;
112 } 114 }
113 115
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ef0579d5bca6..4a5f78b50495 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -107,21 +107,20 @@ found:
107 * 0 - deliver 107 * 0 - deliver
108 * 1 - block 108 * 1 - block
109 */ 109 */
110static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) 110static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
111{ 111{
112 struct icmp6hdr *icmph; 112 struct icmp6hdr *_hdr;
113 struct raw6_sock *rp = raw6_sk(sk); 113 const struct icmp6hdr *hdr;
114
115 if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
116 __u32 *data = &rp->filter.data[0];
117 int bit_nr;
118 114
119 icmph = (struct icmp6hdr *) skb->data; 115 hdr = skb_header_pointer(skb, skb_transport_offset(skb),
120 bit_nr = icmph->icmp6_type; 116 sizeof(_hdr), &_hdr);
117 if (hdr) {
118 const __u32 *data = &raw6_sk(sk)->filter.data[0];
119 unsigned int type = hdr->icmp6_type;
121 120
122 return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; 121 return (data[type >> 5] & (1U << (type & 31))) != 0;
123 } 122 }
124 return 0; 123 return 1;
125} 124}
126 125
127#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 126#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index d71cd9229a47..6f936358d664 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
80 80
81 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, 81 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
82 &l2tp_nl_family, 0, L2TP_CMD_NOOP); 82 &l2tp_nl_family, 0, L2TP_CMD_NOOP);
83 if (IS_ERR(hdr)) { 83 if (!hdr) {
84 ret = PTR_ERR(hdr); 84 ret = -EMSGSIZE;
85 goto err_out; 85 goto err_out;
86 } 86 }
87 87
@@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
250 250
251 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, 251 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
252 L2TP_CMD_TUNNEL_GET); 252 L2TP_CMD_TUNNEL_GET);
253 if (IS_ERR(hdr)) 253 if (!hdr)
254 return PTR_ERR(hdr); 254 return -EMSGSIZE;
255 255
256 if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || 256 if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
257 nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || 257 nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
@@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
617 sk = tunnel->sock; 617 sk = tunnel->sock;
618 618
619 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); 619 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
620 if (IS_ERR(hdr)) 620 if (!hdr)
621 return PTR_ERR(hdr); 621 return -EMSGSIZE;
622 622
623 if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || 623 if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
624 nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || 624 nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 5c22ce8ab309..a4c1e4528cac 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -117,11 +117,11 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
117 117
118 /* For SMP, we only want to use one set of state. */ 118 /* For SMP, we only want to use one set of state. */
119 r->master = priv; 119 r->master = priv;
120 /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
121 128. */
122 priv->prev = jiffies;
123 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
120 if (r->cost == 0) { 124 if (r->cost == 0) {
121 /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
122 128. */
123 priv->prev = jiffies;
124 priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
125 r->credit_cap = priv->credit; /* Credits full. */ 125 r->credit_cap = priv->credit; /* Credits full. */
126 r->cost = user2credits(r->avg); 126 r->cost = user2credits(r->avg);
127 } 127 }
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2ded3c7fad06..72d170ca3406 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -350,6 +350,9 @@ static void reg_regdb_search(struct work_struct *work)
350 struct reg_regdb_search_request *request; 350 struct reg_regdb_search_request *request;
351 const struct ieee80211_regdomain *curdom, *regdom; 351 const struct ieee80211_regdomain *curdom, *regdom;
352 int i, r; 352 int i, r;
353 bool set_reg = false;
354
355 mutex_lock(&cfg80211_mutex);
353 356
354 mutex_lock(&reg_regdb_search_mutex); 357 mutex_lock(&reg_regdb_search_mutex);
355 while (!list_empty(&reg_regdb_search_list)) { 358 while (!list_empty(&reg_regdb_search_list)) {
@@ -365,9 +368,7 @@ static void reg_regdb_search(struct work_struct *work)
365 r = reg_copy_regd(&regdom, curdom); 368 r = reg_copy_regd(&regdom, curdom);
366 if (r) 369 if (r)
367 break; 370 break;
368 mutex_lock(&cfg80211_mutex); 371 set_reg = true;
369 set_regdom(regdom);
370 mutex_unlock(&cfg80211_mutex);
371 break; 372 break;
372 } 373 }
373 } 374 }
@@ -375,6 +376,11 @@ static void reg_regdb_search(struct work_struct *work)
375 kfree(request); 376 kfree(request);
376 } 377 }
377 mutex_unlock(&reg_regdb_search_mutex); 378 mutex_unlock(&reg_regdb_search_mutex);
379
380 if (set_reg)
381 set_regdom(regdom);
382
383 mutex_unlock(&cfg80211_mutex);
378} 384}
379 385
380static DECLARE_WORK(reg_regdb_work, reg_regdb_search); 386static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index d24810fc6af6..fd8fa9aa7c4e 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -200,7 +200,7 @@ EOF
200syscall_list() { 200syscall_list() {
201 grep '^[0-9]' "$1" | sort -n | ( 201 grep '^[0-9]' "$1" | sort -n | (
202 while read nr abi name entry ; do 202 while read nr abi name entry ; do
203 echo <<EOF 203 cat <<EOF
204#if !defined(__NR_${name}) && !defined(__IGNORE_${name}) 204#if !defined(__NR_${name}) && !defined(__IGNORE_${name})
205#warning syscall ${name} not implemented 205#warning syscall ${name} not implemented
206#endif 206#endif
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 3fd5b29dc933..a3acb7a85f6a 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -702,7 +702,7 @@ static bool wm2000_readable_reg(struct device *dev, unsigned int reg)
702} 702}
703 703
704static const struct regmap_config wm2000_regmap = { 704static const struct regmap_config wm2000_regmap = {
705 .reg_bits = 8, 705 .reg_bits = 16,
706 .val_bits = 8, 706 .val_bits = 8,
707 707
708 .max_register = WM2000_REG_IF_CTL, 708 .max_register = WM2000_REG_IF_CTL,
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index d6e2bb49c59c..060dccb9ec75 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -197,7 +197,13 @@ static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
197 /* no data provider, so send silence */ 197 /* no data provider, so send silence */
198 unsigned int offs = 0; 198 unsigned int offs = 0;
199 for (i = 0; i < ctx->packets; ++i) { 199 for (i = 0; i < ctx->packets; ++i) {
200 int counts = ctx->packet_size[i]; 200 int counts;
201
202 if (ctx->packet_size[i])
203 counts = ctx->packet_size[i];
204 else
205 counts = snd_usb_endpoint_next_packet_size(ep);
206
201 urb->iso_frame_desc[i].offset = offs * ep->stride; 207 urb->iso_frame_desc[i].offset = offs * ep->stride;
202 urb->iso_frame_desc[i].length = counts * ep->stride; 208 urb->iso_frame_desc[i].length = counts * ep->stride;
203 offs += counts; 209 offs += counts;