aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 20:46:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 20:46:39 -0400
commit913847586290d5de22659e2a6195d91ff24d5aa6 (patch)
tree98bd9bd7074dd2002fa13c680fb61efadeeabf6e
parentc23190c0bf1236e1eb5521a8b10d0102fbc1338c (diff)
parent27111a23d01c1dba3180c998629004ab4c9ac985 (diff)
Merge branch 'linux-3.17' of git://anongit.freedesktop.org/git/nouveau/linux-2.6
Pull nouveau drm updates from Ben Skeggs: "Apologies for not getting this done in time for Dave's drm-next merge window. As he mentioned, a pre-existing bug reared its head a lot more obviously after this lot of changes. It took quite a bit of time to track it down. In any case, Dave suggested I try my luck by sending directly to you this time. Overview: - more code for Tegra GK20A from NVIDIA - probing, reclockig - better fix for Kepler GPUs that have the graphics engine powered off on startup, method courtesy of info provided by NVIDIA - unhardcoding of a bunch of graphics engine setup on Fermi/Kepler/Maxwell, will hopefully solve some issues people have noticed on higher-end models - support for "Zero Bandwidth Clear" on Fermi/Kepler/Maxwell, needs userspace support in general, but some lucky apps will benefit automagically - reviewed/exposed the full object APIs to userspace (finally), gives it access to perfctrs, ZBC controls, various events. More to come in the future. - various other fixes" Acked-by: Dave Airlie <airlied@redhat.com> * 'linux-3.17' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (87 commits) drm/nouveau: expose the full object/event interfaces to userspace drm/nouveau: fix headless mode drm/nouveau: hide sysfs pstate file behind an option again drm/nv50/disp: shhh compiler drm/gf100-/gr: implement the proper SetShaderExceptions method drm/gf100-/gr: remove some broken ltc bashing, for now drm/gf100-/gr: unhardcode attribute cb config drm/gf100-/gr: fetch tpcs-per-ppc info on startup drm/gf100-/gr: unhardcode pagepool config drm/gf100-/gr: unhardcode bundle cb config drm/gf100-/gr: improve initial context patch list helpers drm/gf100-/gr: add support for zero bandwidth clear drm/nouveau/ltc: add zbc drivers drm/nouveau/ltc: s/ltcg/ltc/ + cleanup drm/nouveau: use ram info from nvif_device drm/nouveau/disp: implement nvif event sources for vblank/connector notifiers drm/nouveau/disp: allow user direct access to channel control registers drm/nouveau/disp: audit and version display classes drm/nouveau/disp: audit and version SCANOUTPOS method drm/nv50-/disp: audit and version PIOR_PWR method ...
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile25
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ioctl.c531
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c49
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c477
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c380
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c160
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c210
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c265
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h470
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/notify.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/class.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/event.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltc.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c665
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/base.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c)165
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c)121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h658
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c114
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h84
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c120
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c143
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c307
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h70
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c137
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c136
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c384
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c920
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h558
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvif/event.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/ioctl.h128
-rw-r--r--drivers/gpu/drm/nouveau/nvif/list.h353
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c302
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.h75
l---------drivers/gpu/drm/nouveau/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/unpack.h24
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c13
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/uapi/drm/nouveau_drm.h11
264 files changed, 12153 insertions, 5795 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 637c29a33127..40afc69a3778 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,5 +1,5 @@
1config DRM_NOUVEAU 1config DRM_NOUVEAU
2 tristate "Nouveau (nVidia) cards" 2 tristate "Nouveau (NVIDIA) cards"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
@@ -23,7 +23,15 @@ config DRM_NOUVEAU
23 select THERMAL if ACPI && X86 23 select THERMAL if ACPI && X86
24 select ACPI_VIDEO if ACPI && X86 24 select ACPI_VIDEO if ACPI && X86
25 help 25 help
26 Choose this option for open-source nVidia support. 26 Choose this option for open-source NVIDIA support.
27
28config NOUVEAU_PLATFORM_DRIVER
29 tristate "Nouveau (NVIDIA) SoC GPUs"
30 depends on DRM_NOUVEAU && ARCH_TEGRA
31 default y
32 help
33 Support for Nouveau platform driver, used for SoC GPUs as found
34 on NVIDIA Tegra K1.
27 35
28config NOUVEAU_DEBUG 36config NOUVEAU_DEBUG
29 int "Maximum debug level" 37 int "Maximum debug level"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 8b307e143632..f5d7f7ce4bc6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -14,8 +14,10 @@ nouveau-y += core/core/enum.o
14nouveau-y += core/core/event.o 14nouveau-y += core/core/event.o
15nouveau-y += core/core/gpuobj.o 15nouveau-y += core/core/gpuobj.o
16nouveau-y += core/core/handle.o 16nouveau-y += core/core/handle.o
17nouveau-y += core/core/ioctl.o
17nouveau-y += core/core/mm.o 18nouveau-y += core/core/mm.o
18nouveau-y += core/core/namedb.o 19nouveau-y += core/core/namedb.o
20nouveau-y += core/core/notify.o
19nouveau-y += core/core/object.o 21nouveau-y += core/core/object.o
20nouveau-y += core/core/option.o 22nouveau-y += core/core/option.o
21nouveau-y += core/core/parent.o 23nouveau-y += core/core/parent.o
@@ -26,6 +28,7 @@ nouveau-y += core/core/subdev.o
26nouveau-y += core/subdev/bar/base.o 28nouveau-y += core/subdev/bar/base.o
27nouveau-y += core/subdev/bar/nv50.o 29nouveau-y += core/subdev/bar/nv50.o
28nouveau-y += core/subdev/bar/nvc0.o 30nouveau-y += core/subdev/bar/nvc0.o
31nouveau-y += core/subdev/bar/gk20a.o
29nouveau-y += core/subdev/bios/base.o 32nouveau-y += core/subdev/bios/base.o
30nouveau-y += core/subdev/bios/bit.o 33nouveau-y += core/subdev/bios/bit.o
31nouveau-y += core/subdev/bios/boost.o 34nouveau-y += core/subdev/bios/boost.o
@@ -64,6 +67,7 @@ nouveau-y += core/subdev/clock/nva3.o
64nouveau-y += core/subdev/clock/nvaa.o 67nouveau-y += core/subdev/clock/nvaa.o
65nouveau-y += core/subdev/clock/nvc0.o 68nouveau-y += core/subdev/clock/nvc0.o
66nouveau-y += core/subdev/clock/nve0.o 69nouveau-y += core/subdev/clock/nve0.o
70nouveau-y += core/subdev/clock/gk20a.o
67nouveau-y += core/subdev/clock/pllnv04.o 71nouveau-y += core/subdev/clock/pllnv04.o
68nouveau-y += core/subdev/clock/pllnva3.o 72nouveau-y += core/subdev/clock/pllnva3.o
69nouveau-y += core/subdev/devinit/base.o 73nouveau-y += core/subdev/devinit/base.o
@@ -149,8 +153,10 @@ nouveau-y += core/subdev/instmem/base.o
149nouveau-y += core/subdev/instmem/nv04.o 153nouveau-y += core/subdev/instmem/nv04.o
150nouveau-y += core/subdev/instmem/nv40.o 154nouveau-y += core/subdev/instmem/nv40.o
151nouveau-y += core/subdev/instmem/nv50.o 155nouveau-y += core/subdev/instmem/nv50.o
152nouveau-y += core/subdev/ltcg/gf100.o 156nouveau-y += core/subdev/ltc/base.o
153nouveau-y += core/subdev/ltcg/gm107.o 157nouveau-y += core/subdev/ltc/gf100.o
158nouveau-y += core/subdev/ltc/gk104.o
159nouveau-y += core/subdev/ltc/gm107.o
154nouveau-y += core/subdev/mc/base.o 160nouveau-y += core/subdev/mc/base.o
155nouveau-y += core/subdev/mc/nv04.o 161nouveau-y += core/subdev/mc/nv04.o
156nouveau-y += core/subdev/mc/nv40.o 162nouveau-y += core/subdev/mc/nv40.o
@@ -161,6 +167,7 @@ nouveau-y += core/subdev/mc/nv94.o
161nouveau-y += core/subdev/mc/nv98.o 167nouveau-y += core/subdev/mc/nv98.o
162nouveau-y += core/subdev/mc/nvc0.o 168nouveau-y += core/subdev/mc/nvc0.o
163nouveau-y += core/subdev/mc/nvc3.o 169nouveau-y += core/subdev/mc/nvc3.o
170nouveau-y += core/subdev/mc/gk20a.o
164nouveau-y += core/subdev/mxm/base.o 171nouveau-y += core/subdev/mxm/base.o
165nouveau-y += core/subdev/mxm/mxms.o 172nouveau-y += core/subdev/mxm/mxms.o
166nouveau-y += core/subdev/mxm/nv50.o 173nouveau-y += core/subdev/mxm/nv50.o
@@ -169,6 +176,7 @@ nouveau-y += core/subdev/pwr/memx.o
169nouveau-y += core/subdev/pwr/nva3.o 176nouveau-y += core/subdev/pwr/nva3.o
170nouveau-y += core/subdev/pwr/nvc0.o 177nouveau-y += core/subdev/pwr/nvc0.o
171nouveau-y += core/subdev/pwr/nvd0.o 178nouveau-y += core/subdev/pwr/nvd0.o
179nouveau-y += core/subdev/pwr/gk104.o
172nouveau-y += core/subdev/pwr/nv108.o 180nouveau-y += core/subdev/pwr/nv108.o
173nouveau-y += core/subdev/therm/base.o 181nouveau-y += core/subdev/therm/base.o
174nouveau-y += core/subdev/therm/fan.o 182nouveau-y += core/subdev/therm/fan.o
@@ -211,6 +219,7 @@ nouveau-y += core/engine/copy/nvc0.o
211nouveau-y += core/engine/copy/nve0.o 219nouveau-y += core/engine/copy/nve0.o
212nouveau-y += core/engine/crypt/nv84.o 220nouveau-y += core/engine/crypt/nv84.o
213nouveau-y += core/engine/crypt/nv98.o 221nouveau-y += core/engine/crypt/nv98.o
222nouveau-y += core/engine/device/acpi.o
214nouveau-y += core/engine/device/base.o 223nouveau-y += core/engine/device/base.o
215nouveau-y += core/engine/device/ctrl.o 224nouveau-y += core/engine/device/ctrl.o
216nouveau-y += core/engine/device/nv04.o 225nouveau-y += core/engine/device/nv04.o
@@ -270,6 +279,7 @@ nouveau-y += core/engine/graph/ctxnvd9.o
270nouveau-y += core/engine/graph/ctxnve4.o 279nouveau-y += core/engine/graph/ctxnve4.o
271nouveau-y += core/engine/graph/ctxgk20a.o 280nouveau-y += core/engine/graph/ctxgk20a.o
272nouveau-y += core/engine/graph/ctxnvf0.o 281nouveau-y += core/engine/graph/ctxnvf0.o
282nouveau-y += core/engine/graph/ctxgk110b.o
273nouveau-y += core/engine/graph/ctxnv108.o 283nouveau-y += core/engine/graph/ctxnv108.o
274nouveau-y += core/engine/graph/ctxgm107.o 284nouveau-y += core/engine/graph/ctxgm107.o
275nouveau-y += core/engine/graph/nv04.o 285nouveau-y += core/engine/graph/nv04.o
@@ -291,6 +301,7 @@ nouveau-y += core/engine/graph/nvd9.o
291nouveau-y += core/engine/graph/nve4.o 301nouveau-y += core/engine/graph/nve4.o
292nouveau-y += core/engine/graph/gk20a.o 302nouveau-y += core/engine/graph/gk20a.o
293nouveau-y += core/engine/graph/nvf0.o 303nouveau-y += core/engine/graph/nvf0.o
304nouveau-y += core/engine/graph/gk110b.o
294nouveau-y += core/engine/graph/nv108.o 305nouveau-y += core/engine/graph/nv108.o
295nouveau-y += core/engine/graph/gm107.o 306nouveau-y += core/engine/graph/gm107.o
296nouveau-y += core/engine/mpeg/nv31.o 307nouveau-y += core/engine/mpeg/nv31.o
@@ -318,11 +329,18 @@ nouveau-y += core/engine/vp/nv98.o
318nouveau-y += core/engine/vp/nvc0.o 329nouveau-y += core/engine/vp/nvc0.o
319nouveau-y += core/engine/vp/nve0.o 330nouveau-y += core/engine/vp/nve0.o
320 331
332# nvif
333nouveau-y += nvif/object.o
334nouveau-y += nvif/client.o
335nouveau-y += nvif/device.o
336nouveau-y += nvif/notify.o
337
321# drm/core 338# drm/core
322nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o 339nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
323nouveau-y += nouveau_vga.o nouveau_agp.o 340nouveau-y += nouveau_vga.o nouveau_agp.o
324nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o 341nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
325nouveau-y += nouveau_prime.o nouveau_abi16.o 342nouveau-y += nouveau_prime.o nouveau_abi16.o
343nouveau-y += nouveau_nvif.o nouveau_usif.o
326nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o 344nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
327nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o 345nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
328 346
@@ -349,3 +367,6 @@ nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
349nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o 367nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
350 368
351obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o 369obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
370
371# platform driver
372obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 9079c0ac58e6..10598dede9e9 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -26,13 +26,167 @@
26#include <core/client.h> 26#include <core/client.h>
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/option.h> 28#include <core/option.h>
29#include <nvif/unpack.h>
30#include <nvif/class.h>
31
32#include <nvif/unpack.h>
33#include <nvif/event.h>
29 34
30#include <engine/device.h> 35#include <engine/device.h>
31 36
37struct nvkm_client_notify {
38 struct nouveau_client *client;
39 struct nvkm_notify n;
40 u8 version;
41 u8 size;
42 union {
43 struct nvif_notify_rep_v0 v0;
44 } rep;
45};
46
47static int
48nvkm_client_notify(struct nvkm_notify *n)
49{
50 struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n);
51 struct nouveau_client *client = notify->client;
52 return client->ntfy(&notify->rep, notify->size, n->data, n->size);
53}
54
55int
56nvkm_client_notify_put(struct nouveau_client *client, int index)
57{
58 if (index < ARRAY_SIZE(client->notify)) {
59 if (client->notify[index]) {
60 nvkm_notify_put(&client->notify[index]->n);
61 return 0;
62 }
63 }
64 return -ENOENT;
65}
66
67int
68nvkm_client_notify_get(struct nouveau_client *client, int index)
69{
70 if (index < ARRAY_SIZE(client->notify)) {
71 if (client->notify[index]) {
72 nvkm_notify_get(&client->notify[index]->n);
73 return 0;
74 }
75 }
76 return -ENOENT;
77}
78
79int
80nvkm_client_notify_del(struct nouveau_client *client, int index)
81{
82 if (index < ARRAY_SIZE(client->notify)) {
83 if (client->notify[index]) {
84 nvkm_notify_fini(&client->notify[index]->n);
85 kfree(client->notify[index]);
86 client->notify[index] = NULL;
87 return 0;
88 }
89 }
90 return -ENOENT;
91}
92
93int
94nvkm_client_notify_new(struct nouveau_client *client,
95 struct nvkm_event *event, void *data, u32 size)
96{
97 struct nvkm_client_notify *notify;
98 union {
99 struct nvif_notify_req_v0 v0;
100 } *req = data;
101 u8 index, reply;
102 int ret;
103
104 for (index = 0; index < ARRAY_SIZE(client->notify); index++) {
105 if (!client->notify[index])
106 break;
107 }
108
109 if (index == ARRAY_SIZE(client->notify))
110 return -ENOSPC;
111
112 notify = kzalloc(sizeof(*notify), GFP_KERNEL);
113 if (!notify)
114 return -ENOMEM;
115
116 nv_ioctl(client, "notify new size %d\n", size);
117 if (nvif_unpack(req->v0, 0, 0, true)) {
118 nv_ioctl(client, "notify new vers %d reply %d route %02x "
119 "token %llx\n", req->v0.version,
120 req->v0.reply, req->v0.route, req->v0.token);
121 notify->version = req->v0.version;
122 notify->size = sizeof(notify->rep.v0);
123 notify->rep.v0.version = req->v0.version;
124 notify->rep.v0.route = req->v0.route;
125 notify->rep.v0.token = req->v0.token;
126 reply = req->v0.reply;
127 }
128
129 if (ret == 0) {
130 ret = nvkm_notify_init(event, nvkm_client_notify, false,
131 data, size, reply, &notify->n);
132 if (ret == 0) {
133 client->notify[index] = notify;
134 notify->client = client;
135 return 0;
136 }
137 }
138
139 kfree(notify);
140 return 0;
141}
142
143static int
144nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
145{
146 union {
147 struct nv_client_devlist_v0 v0;
148 } *args = data;
149 int ret;
150
151 nv_ioctl(object, "client devlist size %d\n", size);
152 if (nvif_unpack(args->v0, 0, 0, true)) {
153 nv_ioctl(object, "client devlist vers %d count %d\n",
154 args->v0.version, args->v0.count);
155 if (size == sizeof(args->v0.device[0]) * args->v0.count) {
156 ret = nouveau_device_list(args->v0.device,
157 args->v0.count);
158 if (ret >= 0) {
159 args->v0.count = ret;
160 ret = 0;
161 }
162 } else {
163 ret = -EINVAL;
164 }
165 }
166
167 return ret;
168}
169
170static int
171nouveau_client_mthd(struct nouveau_object *object, u32 mthd,
172 void *data, u32 size)
173{
174 switch (mthd) {
175 case NV_CLIENT_DEVLIST:
176 return nouveau_client_devlist(object, data, size);
177 default:
178 break;
179 }
180 return -EINVAL;
181}
182
32static void 183static void
33nouveau_client_dtor(struct nouveau_object *object) 184nouveau_client_dtor(struct nouveau_object *object)
34{ 185{
35 struct nouveau_client *client = (void *)object; 186 struct nouveau_client *client = (void *)object;
187 int i;
188 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
189 nvkm_client_notify_del(client, i);
36 nouveau_object_ref(NULL, &client->device); 190 nouveau_object_ref(NULL, &client->device);
37 nouveau_handle_destroy(client->root); 191 nouveau_handle_destroy(client->root);
38 nouveau_namedb_destroy(&client->base); 192 nouveau_namedb_destroy(&client->base);
@@ -42,6 +196,7 @@ static struct nouveau_oclass
42nouveau_client_oclass = { 196nouveau_client_oclass = {
43 .ofuncs = &(struct nouveau_ofuncs) { 197 .ofuncs = &(struct nouveau_ofuncs) {
44 .dtor = nouveau_client_dtor, 198 .dtor = nouveau_client_dtor,
199 .mthd = nouveau_client_mthd,
45 }, 200 },
46}; 201};
47 202
@@ -93,9 +248,12 @@ int
93nouveau_client_fini(struct nouveau_client *client, bool suspend) 248nouveau_client_fini(struct nouveau_client *client, bool suspend)
94{ 249{
95 const char *name[2] = { "fini", "suspend" }; 250 const char *name[2] = { "fini", "suspend" };
96 int ret; 251 int ret, i;
97
98 nv_debug(client, "%s running\n", name[suspend]); 252 nv_debug(client, "%s running\n", name[suspend]);
253 nv_debug(client, "%s notify\n", name[suspend]);
254 for (i = 0; i < ARRAY_SIZE(client->notify); i++)
255 nvkm_client_notify_put(client, i);
256 nv_debug(client, "%s object\n", name[suspend]);
99 ret = nouveau_handle_fini(client->root, suspend); 257 ret = nouveau_handle_fini(client->root, suspend);
100 nv_debug(client, "%s completed with %d\n", name[suspend], ret); 258 nv_debug(client, "%s completed with %d\n", name[suspend], ret);
101 return ret; 259 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ae81d3b5d8b7..0540a48c5678 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2013 Red Hat Inc. 2 * Copyright 2013-2014 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -24,173 +24,77 @@
24#include <core/event.h> 24#include <core/event.h>
25 25
26void 26void
27nouveau_event_put(struct nouveau_eventh *handler) 27nvkm_event_put(struct nvkm_event *event, u32 types, int index)
28{ 28{
29 struct nouveau_event *event = handler->event; 29 BUG_ON(!spin_is_locked(&event->refs_lock));
30 unsigned long flags; 30 while (types) {
31 u32 m, t; 31 int type = __ffs(types); types &= ~(1 << type);
32 32 if (--event->refs[index * event->types_nr + type] == 0) {
33 if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) 33 if (event->func->fini)
34 return; 34 event->func->fini(event, 1 << type, index);
35
36 spin_lock_irqsave(&event->refs_lock, flags);
37 for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
38 if (!--event->refs[handler->index * event->types_nr + t]) {
39 if (event->disable)
40 event->disable(event, 1 << t, handler->index);
41 } 35 }
42
43 } 36 }
44 spin_unlock_irqrestore(&event->refs_lock, flags);
45} 37}
46 38
47void 39void
48nouveau_event_get(struct nouveau_eventh *handler) 40nvkm_event_get(struct nvkm_event *event, u32 types, int index)
49{ 41{
50 struct nouveau_event *event = handler->event; 42 BUG_ON(!spin_is_locked(&event->refs_lock));
51 unsigned long flags; 43 while (types) {
52 u32 m, t; 44 int type = __ffs(types); types &= ~(1 << type);
53 45 if (++event->refs[index * event->types_nr + type] == 1) {
54 if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) 46 if (event->func->init)
55 return; 47 event->func->init(event, 1 << type, index);
56
57 spin_lock_irqsave(&event->refs_lock, flags);
58 for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
59 if (!event->refs[handler->index * event->types_nr + t]++) {
60 if (event->enable)
61 event->enable(event, 1 << t, handler->index);
62 } 48 }
63
64 } 49 }
65 spin_unlock_irqrestore(&event->refs_lock, flags);
66}
67
68static void
69nouveau_event_fini(struct nouveau_eventh *handler)
70{
71 struct nouveau_event *event = handler->event;
72 unsigned long flags;
73 nouveau_event_put(handler);
74 spin_lock_irqsave(&event->list_lock, flags);
75 list_del(&handler->head);
76 spin_unlock_irqrestore(&event->list_lock, flags);
77}
78
79static int
80nouveau_event_init(struct nouveau_event *event, u32 types, int index,
81 int (*func)(void *, u32, int), void *priv,
82 struct nouveau_eventh *handler)
83{
84 unsigned long flags;
85
86 if (types & ~((1 << event->types_nr) - 1))
87 return -EINVAL;
88 if (index >= event->index_nr)
89 return -EINVAL;
90
91 handler->event = event;
92 handler->flags = 0;
93 handler->types = types;
94 handler->index = index;
95 handler->func = func;
96 handler->priv = priv;
97
98 spin_lock_irqsave(&event->list_lock, flags);
99 list_add_tail(&handler->head, &event->list[index]);
100 spin_unlock_irqrestore(&event->list_lock, flags);
101 return 0;
102}
103
104int
105nouveau_event_new(struct nouveau_event *event, u32 types, int index,
106 int (*func)(void *, u32, int), void *priv,
107 struct nouveau_eventh **phandler)
108{
109 struct nouveau_eventh *handler;
110 int ret = -ENOMEM;
111
112 if (event->check) {
113 ret = event->check(event, types, index);
114 if (ret)
115 return ret;
116 }
117
118 handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
119 if (handler) {
120 ret = nouveau_event_init(event, types, index, func, priv, handler);
121 if (ret)
122 kfree(handler);
123 }
124
125 return ret;
126}
127
128void
129nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
130{
131 BUG_ON(handler != NULL);
132 if (*ref) {
133 nouveau_event_fini(*ref);
134 kfree(*ref);
135 }
136 *ref = handler;
137} 50}
138 51
139void 52void
140nouveau_event_trigger(struct nouveau_event *event, u32 types, int index) 53nvkm_event_send(struct nvkm_event *event, u32 types, int index,
54 void *data, u32 size)
141{ 55{
142 struct nouveau_eventh *handler; 56 struct nvkm_notify *notify;
143 unsigned long flags; 57 unsigned long flags;
144 58
145 if (WARN_ON(index >= event->index_nr)) 59 if (!event->refs || WARN_ON(index >= event->index_nr))
146 return; 60 return;
147 61
148 spin_lock_irqsave(&event->list_lock, flags); 62 spin_lock_irqsave(&event->list_lock, flags);
149 list_for_each_entry(handler, &event->list[index], head) { 63 list_for_each_entry(notify, &event->list, head) {
150 if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags)) 64 if (notify->index == index && (notify->types & types)) {
151 continue; 65 if (event->func->send) {
152 if (!(handler->types & types)) 66 event->func->send(data, size, notify);
153 continue; 67 continue;
154 if (handler->func(handler->priv, handler->types & types, index) 68 }
155 != NVKM_EVENT_DROP) 69 nvkm_notify_send(notify, data, size);
156 continue; 70 }
157 nouveau_event_put(handler);
158 } 71 }
159 spin_unlock_irqrestore(&event->list_lock, flags); 72 spin_unlock_irqrestore(&event->list_lock, flags);
160} 73}
161 74
162void 75void
163nouveau_event_destroy(struct nouveau_event **pevent) 76nvkm_event_fini(struct nvkm_event *event)
164{ 77{
165 struct nouveau_event *event = *pevent; 78 if (event->refs) {
166 if (event) { 79 kfree(event->refs);
167 kfree(event); 80 event->refs = NULL;
168 *pevent = NULL;
169 } 81 }
170} 82}
171 83
172int 84int
173nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent) 85nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
86 struct nvkm_event *event)
174{ 87{
175 struct nouveau_event *event; 88 event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
176 int i; 89 GFP_KERNEL);
177 90 if (!event->refs)
178 event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
179 sizeof(event->refs[0]), GFP_KERNEL);
180 if (!event)
181 return -ENOMEM;
182
183 event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
184 if (!event->list) {
185 kfree(event);
186 return -ENOMEM; 91 return -ENOMEM;
187 }
188 92
189 spin_lock_init(&event->list_lock); 93 event->func = func;
190 spin_lock_init(&event->refs_lock);
191 for (i = 0; i < index_nr; i++)
192 INIT_LIST_HEAD(&event->list[i]);
193 event->types_nr = types_nr; 94 event->types_nr = types_nr;
194 event->index_nr = index_nr; 95 event->index_nr = index_nr;
96 spin_lock_init(&event->refs_lock);
97 spin_lock_init(&event->list_lock);
98 INIT_LIST_HEAD(&event->list);
195 return 0; 99 return 0;
196} 100}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index 264c2b338ac3..a490b805d7e3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -146,9 +146,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
146 } 146 }
147 147
148 hprintk(handle, TRACE, "created\n"); 148 hprintk(handle, TRACE, "created\n");
149
150 *phandle = handle; 149 *phandle = handle;
151
152 return 0; 150 return 0;
153} 151}
154 152
@@ -224,3 +222,116 @@ nouveau_handle_put(struct nouveau_handle *handle)
224 if (handle) 222 if (handle)
225 nouveau_namedb_put(handle); 223 nouveau_namedb_put(handle);
226} 224}
225
226int
227nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
228 u16 _oclass, void *data, u32 size,
229 struct nouveau_object **pobject)
230{
231 struct nouveau_object *parent = NULL;
232 struct nouveau_object *engctx = NULL;
233 struct nouveau_object *object = NULL;
234 struct nouveau_object *engine;
235 struct nouveau_oclass *oclass;
236 struct nouveau_handle *handle;
237 int ret;
238
239 /* lookup parent object and ensure it *is* a parent */
240 parent = nouveau_handle_ref(client, _parent);
241 if (!parent) {
242 nv_error(client, "parent 0x%08x not found\n", _parent);
243 return -ENOENT;
244 }
245
246 if (!nv_iclass(parent, NV_PARENT_CLASS)) {
247 nv_error(parent, "cannot have children\n");
248 ret = -EINVAL;
249 goto fail_class;
250 }
251
252 /* check that parent supports the requested subclass */
253 ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
254 if (ret) {
255 nv_debug(parent, "illegal class 0x%04x\n", _oclass);
256 goto fail_class;
257 }
258
259 /* make sure engine init has been completed *before* any objects
260 * it controls are created - the constructors may depend on
261 * state calculated at init (ie. default context construction)
262 */
263 if (engine) {
264 ret = nouveau_object_inc(engine);
265 if (ret)
266 goto fail_class;
267 }
268
269 /* if engine requires it, create a context object to insert
270 * between the parent and its children (eg. PGRAPH context)
271 */
272 if (engine && nv_engine(engine)->cclass) {
273 ret = nouveau_object_ctor(parent, engine,
274 nv_engine(engine)->cclass,
275 data, size, &engctx);
276 if (ret)
277 goto fail_engctx;
278 } else {
279 nouveau_object_ref(parent, &engctx);
280 }
281
282 /* finally, create new object and bind it to its handle */
283 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
284 *pobject = object;
285 if (ret)
286 goto fail_ctor;
287
288 ret = nouveau_object_inc(object);
289 if (ret)
290 goto fail_init;
291
292 ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
293 if (ret)
294 goto fail_handle;
295
296 ret = nouveau_handle_init(handle);
297 if (ret)
298 nouveau_handle_destroy(handle);
299
300fail_handle:
301 nouveau_object_dec(object, false);
302fail_init:
303 nouveau_object_ref(NULL, &object);
304fail_ctor:
305 nouveau_object_ref(NULL, &engctx);
306fail_engctx:
307 if (engine)
308 nouveau_object_dec(engine, false);
309fail_class:
310 nouveau_object_ref(NULL, &parent);
311 return ret;
312}
313
314int
315nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
316{
317 struct nouveau_object *parent = NULL;
318 struct nouveau_object *namedb = NULL;
319 struct nouveau_handle *handle = NULL;
320
321 parent = nouveau_handle_ref(client, _parent);
322 if (!parent)
323 return -ENOENT;
324
325 namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
326 if (namedb) {
327 handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
328 if (handle) {
329 nouveau_namedb_put(handle);
330 nouveau_handle_fini(handle, false);
331 nouveau_handle_destroy(handle);
332 }
333 }
334
335 nouveau_object_ref(NULL, &parent);
336 return handle ? 0 : -EINVAL;
337}
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c
new file mode 100644
index 000000000000..f7e19bfb489c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c
@@ -0,0 +1,531 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/parent.h>
27#include <core/handle.h>
28#include <core/namedb.h>
29#include <core/client.h>
30#include <core/device.h>
31#include <core/ioctl.h>
32#include <core/event.h>
33
34#include <nvif/unpack.h>
35#include <nvif/ioctl.h>
36
37static int
38nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size)
39{
40 struct nouveau_object *object = handle->object;
41 union {
42 struct nvif_ioctl_nop none;
43 } *args = data;
44 int ret;
45
46 nv_ioctl(object, "nop size %d\n", size);
47 if (nvif_unvers(args->none)) {
48 nv_ioctl(object, "nop\n");
49 }
50
51 return ret;
52}
53
54static int
55nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
56{
57 struct nouveau_object *object = handle->object;
58 union {
59 struct nvif_ioctl_sclass_v0 v0;
60 } *args = data;
61 int ret;
62
63 if (!nv_iclass(object, NV_PARENT_CLASS)) {
64 nv_debug(object, "cannot have children (sclass)\n");
65 return -ENODEV;
66 }
67
68 nv_ioctl(object, "sclass size %d\n", size);
69 if (nvif_unpack(args->v0, 0, 0, true)) {
70 nv_ioctl(object, "sclass vers %d count %d\n",
71 args->v0.version, args->v0.count);
72 if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
73 ret = nouveau_parent_lclass(object, args->v0.oclass,
74 args->v0.count);
75 if (ret >= 0) {
76 args->v0.count = ret;
77 ret = 0;
78 }
79 } else {
80 ret = -EINVAL;
81 }
82 }
83
84 return ret;
85}
86
87static int
88nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
89{
90 union {
91 struct nvif_ioctl_new_v0 v0;
92 } *args = data;
93 struct nouveau_client *client = nouveau_client(parent->object);
94 struct nouveau_object *engctx = NULL;
95 struct nouveau_object *object = NULL;
96 struct nouveau_object *engine;
97 struct nouveau_oclass *oclass;
98 struct nouveau_handle *handle;
99 u32 _handle, _oclass;
100 int ret;
101
102 nv_ioctl(client, "new size %d\n", size);
103 if (nvif_unpack(args->v0, 0, 0, true)) {
104 _handle = args->v0.handle;
105 _oclass = args->v0.oclass;
106 } else
107 return ret;
108
109 nv_ioctl(client, "new vers %d handle %08x class %08x "
110 "route %02x token %llx\n",
111 args->v0.version, _handle, _oclass,
112 args->v0.route, args->v0.token);
113
114 if (!nv_iclass(parent->object, NV_PARENT_CLASS)) {
115 nv_debug(parent->object, "cannot have children (ctor)\n");
116 ret = -ENODEV;
117 goto fail_class;
118 }
119
120 /* check that parent supports the requested subclass */
121 ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass);
122 if (ret) {
123 nv_debug(parent->object, "illegal class 0x%04x\n", _oclass);
124 goto fail_class;
125 }
126
127 /* make sure engine init has been completed *before* any objects
128 * it controls are created - the constructors may depend on
129 * state calculated at init (ie. default context construction)
130 */
131 if (engine) {
132 ret = nouveau_object_inc(engine);
133 if (ret)
134 goto fail_class;
135 }
136
137 /* if engine requires it, create a context object to insert
138 * between the parent and its children (eg. PGRAPH context)
139 */
140 if (engine && nv_engine(engine)->cclass) {
141 ret = nouveau_object_ctor(parent->object, engine,
142 nv_engine(engine)->cclass,
143 data, size, &engctx);
144 if (ret)
145 goto fail_engctx;
146 } else {
147 nouveau_object_ref(parent->object, &engctx);
148 }
149
150 /* finally, create new object and bind it to its handle */
151 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
152 client->data = object;
153 if (ret)
154 goto fail_ctor;
155
156 ret = nouveau_object_inc(object);
157 if (ret)
158 goto fail_init;
159
160 ret = nouveau_handle_create(parent->object, parent->name,
161 _handle, object, &handle);
162 if (ret)
163 goto fail_handle;
164
165 ret = nouveau_handle_init(handle);
166 handle->route = args->v0.route;
167 handle->token = args->v0.token;
168 if (ret)
169 nouveau_handle_destroy(handle);
170
171fail_handle:
172 nouveau_object_dec(object, false);
173fail_init:
174 nouveau_object_ref(NULL, &object);
175fail_ctor:
176 nouveau_object_ref(NULL, &engctx);
177fail_engctx:
178 if (engine)
179 nouveau_object_dec(engine, false);
180fail_class:
181 return ret;
182}
183
184static int
185nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size)
186{
187 struct nouveau_object *object = handle->object;
188 union {
189 struct nvif_ioctl_del none;
190 } *args = data;
191 int ret;
192
193 nv_ioctl(object, "delete size %d\n", size);
194 if (nvif_unvers(args->none)) {
195 nv_ioctl(object, "delete\n");
196 nouveau_handle_fini(handle, false);
197 nouveau_handle_destroy(handle);
198 }
199
200 return ret;
201}
202
203static int
204nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size)
205{
206 struct nouveau_object *object = handle->object;
207 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
208 union {
209 struct nvif_ioctl_mthd_v0 v0;
210 } *args = data;
211 int ret;
212
213 nv_ioctl(object, "mthd size %d\n", size);
214 if (nvif_unpack(args->v0, 0, 0, true)) {
215 nv_ioctl(object, "mthd vers %d mthd %02x\n",
216 args->v0.version, args->v0.method);
217 if (ret = -ENODEV, ofuncs->mthd)
218 ret = ofuncs->mthd(object, args->v0.method, data, size);
219 }
220
221 return ret;
222}
223
224
225static int
226nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
227{
228 struct nouveau_object *object = handle->object;
229 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
230 union {
231 struct nvif_ioctl_rd_v0 v0;
232 } *args = data;
233 int ret;
234
235 nv_ioctl(object, "rd size %d\n", size);
236 if (nvif_unpack(args->v0, 0, 0, false)) {
237 nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
238 args->v0.version, args->v0.size, args->v0.addr);
239 switch (args->v0.size) {
240 case 1:
241 if (ret = -ENODEV, ofuncs->rd08) {
242 args->v0.data = nv_ro08(object, args->v0.addr);
243 ret = 0;
244 }
245 break;
246 case 2:
247 if (ret = -ENODEV, ofuncs->rd16) {
248 args->v0.data = nv_ro16(object, args->v0.addr);
249 ret = 0;
250 }
251 break;
252 case 4:
253 if (ret = -ENODEV, ofuncs->rd32) {
254 args->v0.data = nv_ro32(object, args->v0.addr);
255 ret = 0;
256 }
257 break;
258 default:
259 ret = -EINVAL;
260 break;
261 }
262 }
263
264 return ret;
265}
266
267static int
268nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
269{
270 struct nouveau_object *object = handle->object;
271 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
272 union {
273 struct nvif_ioctl_wr_v0 v0;
274 } *args = data;
275 int ret;
276
277 nv_ioctl(object, "wr size %d\n", size);
278 if (nvif_unpack(args->v0, 0, 0, false)) {
279 nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
280 args->v0.version, args->v0.size, args->v0.addr,
281 args->v0.data);
282 switch (args->v0.size) {
283 case 1:
284 if (ret = -ENODEV, ofuncs->wr08) {
285 nv_wo08(object, args->v0.addr, args->v0.data);
286 ret = 0;
287 }
288 break;
289 case 2:
290 if (ret = -ENODEV, ofuncs->wr16) {
291 nv_wo16(object, args->v0.addr, args->v0.data);
292 ret = 0;
293 }
294 break;
295 case 4:
296 if (ret = -ENODEV, ofuncs->wr32) {
297 nv_wo32(object, args->v0.addr, args->v0.data);
298 ret = 0;
299 }
300 break;
301 default:
302 ret = -EINVAL;
303 break;
304 }
305 }
306
307 return ret;
308}
309
310static int
311nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size)
312{
313 struct nouveau_object *object = handle->object;
314 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
315 union {
316 struct nvif_ioctl_map_v0 v0;
317 } *args = data;
318 int ret;
319
320 nv_ioctl(object, "map size %d\n", size);
321 if (nvif_unpack(args->v0, 0, 0, false)) {
322 nv_ioctl(object, "map vers %d\n", args->v0.version);
323 if (ret = -ENODEV, ofuncs->map) {
324 ret = ofuncs->map(object, &args->v0.handle,
325 &args->v0.length);
326 }
327 }
328
329 return ret;
330}
331
332static int
333nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
334{
335 struct nouveau_object *object = handle->object;
336 union {
337 struct nvif_ioctl_unmap none;
338 } *args = data;
339 int ret;
340
341 nv_ioctl(object, "unmap size %d\n", size);
342 if (nvif_unvers(args->none)) {
343 nv_ioctl(object, "unmap\n");
344 }
345
346 return ret;
347}
348
349static int
350nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
351{
352 struct nouveau_client *client = nouveau_client(handle->object);
353 struct nouveau_object *object = handle->object;
354 struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
355 union {
356 struct nvif_ioctl_ntfy_new_v0 v0;
357 } *args = data;
358 struct nvkm_event *event;
359 int ret;
360
361 nv_ioctl(object, "ntfy new size %d\n", size);
362 if (nvif_unpack(args->v0, 0, 0, true)) {
363 nv_ioctl(object, "ntfy new vers %d event %02x\n",
364 args->v0.version, args->v0.event);
365 if (ret = -ENODEV, ofuncs->ntfy)
366 ret = ofuncs->ntfy(object, args->v0.event, &event);
367 if (ret == 0) {
368 ret = nvkm_client_notify_new(client, event, data, size);
369 if (ret >= 0) {
370 args->v0.index = ret;
371 ret = 0;
372 }
373 }
374 }
375
376 return ret;
377}
378
379static int
380nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size)
381{
382 struct nouveau_client *client = nouveau_client(handle->object);
383 struct nouveau_object *object = handle->object;
384 union {
385 struct nvif_ioctl_ntfy_del_v0 v0;
386 } *args = data;
387 int ret;
388
389 nv_ioctl(object, "ntfy del size %d\n", size);
390 if (nvif_unpack(args->v0, 0, 0, false)) {
391 nv_ioctl(object, "ntfy del vers %d index %d\n",
392 args->v0.version, args->v0.index);
393 ret = nvkm_client_notify_del(client, args->v0.index);
394 }
395
396 return ret;
397}
398
399static int
400nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size)
401{
402 struct nouveau_client *client = nouveau_client(handle->object);
403 struct nouveau_object *object = handle->object;
404 union {
405 struct nvif_ioctl_ntfy_get_v0 v0;
406 } *args = data;
407 int ret;
408
409 nv_ioctl(object, "ntfy get size %d\n", size);
410 if (nvif_unpack(args->v0, 0, 0, false)) {
411 nv_ioctl(object, "ntfy get vers %d index %d\n",
412 args->v0.version, args->v0.index);
413 ret = nvkm_client_notify_get(client, args->v0.index);
414 }
415
416 return ret;
417}
418
419static int
420nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size)
421{
422 struct nouveau_client *client = nouveau_client(handle->object);
423 struct nouveau_object *object = handle->object;
424 union {
425 struct nvif_ioctl_ntfy_put_v0 v0;
426 } *args = data;
427 int ret;
428
429 nv_ioctl(object, "ntfy put size %d\n", size);
430 if (nvif_unpack(args->v0, 0, 0, false)) {
431 nv_ioctl(object, "ntfy put vers %d index %d\n",
432 args->v0.version, args->v0.index);
433 ret = nvkm_client_notify_put(client, args->v0.index);
434 }
435
436 return ret;
437}
438
439static struct {
440 int version;
441 int (*func)(struct nouveau_handle *, void *, u32);
442}
443nvkm_ioctl_v0[] = {
444 { 0x00, nvkm_ioctl_nop },
445 { 0x00, nvkm_ioctl_sclass },
446 { 0x00, nvkm_ioctl_new },
447 { 0x00, nvkm_ioctl_del },
448 { 0x00, nvkm_ioctl_mthd },
449 { 0x00, nvkm_ioctl_rd },
450 { 0x00, nvkm_ioctl_wr },
451 { 0x00, nvkm_ioctl_map },
452 { 0x00, nvkm_ioctl_unmap },
453 { 0x00, nvkm_ioctl_ntfy_new },
454 { 0x00, nvkm_ioctl_ntfy_del },
455 { 0x00, nvkm_ioctl_ntfy_get },
456 { 0x00, nvkm_ioctl_ntfy_put },
457};
458
459static int
460nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
461 u32 *path, void *data, u32 size,
462 u8 owner, u8 *route, u64 *token)
463{
464 struct nouveau_handle *handle = parent;
465 struct nouveau_namedb *namedb;
466 struct nouveau_object *object;
467 int ret;
468
469 while ((object = parent->object), nr--) {
470 nv_ioctl(object, "path 0x%08x\n", path[nr]);
471 if (!nv_iclass(object, NV_PARENT_CLASS)) {
472 nv_debug(object, "cannot have children (path)\n");
473 return -EINVAL;
474 }
475
476 if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
477 !(handle = nouveau_namedb_get(namedb, path[nr]))) {
478 nv_debug(object, "handle 0x%08x not found\n", path[nr]);
479 return -ENOENT;
480 }
481 nouveau_namedb_put(handle);
482 parent = handle;
483 }
484
485 if (owner != NVIF_IOCTL_V0_OWNER_ANY &&
486 owner != handle->route) {
487 nv_ioctl(object, "object route != owner\n");
488 return -EACCES;
489 }
490 *route = handle->route;
491 *token = handle->token;
492
493 if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
494 if (nvkm_ioctl_v0[type].version == 0) {
495 ret = nvkm_ioctl_v0[type].func(handle, data, size);
496 }
497 }
498
499 return ret;
500}
501
502int
503nvkm_ioctl(struct nouveau_client *client, bool supervisor,
504 void *data, u32 size, void **hack)
505{
506 union {
507 struct nvif_ioctl_v0 v0;
508 } *args = data;
509 int ret;
510
511 client->super = supervisor;
512 nv_ioctl(client, "size %d\n", size);
513
514 if (nvif_unpack(args->v0, 0, 0, true)) {
515 nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
516 args->v0.version, args->v0.type, args->v0.path_nr,
517 args->v0.owner);
518 ret = nvkm_ioctl_path(client->root, args->v0.type,
519 args->v0.path_nr, args->v0.path,
520 data, size, args->v0.owner,
521 &args->v0.route, &args->v0.token);
522 }
523
524 nv_ioctl(client, "return %d\n", ret);
525 if (hack) {
526 *hack = client->data;
527 client->data = NULL;
528 }
529 client->super = false;
530 return ret;
531}
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
new file mode 100644
index 000000000000..76adb81bdea2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -0,0 +1,167 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/client.h>
26#include <core/event.h>
27#include <core/notify.h>
28
29#include <nvif/unpack.h>
30#include <nvif/event.h>
31
32static inline void
33nvkm_notify_put_locked(struct nvkm_notify *notify)
34{
35 if (notify->block++ == 0)
36 nvkm_event_put(notify->event, notify->types, notify->index);
37}
38
39void
40nvkm_notify_put(struct nvkm_notify *notify)
41{
42 struct nvkm_event *event = notify->event;
43 unsigned long flags;
44 if (likely(event) &&
45 test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
46 spin_lock_irqsave(&event->refs_lock, flags);
47 nvkm_notify_put_locked(notify);
48 spin_unlock_irqrestore(&event->refs_lock, flags);
49 if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
50 flush_work(&notify->work);
51 }
52}
53
54static inline void
55nvkm_notify_get_locked(struct nvkm_notify *notify)
56{
57 if (--notify->block == 0)
58 nvkm_event_get(notify->event, notify->types, notify->index);
59}
60
61void
62nvkm_notify_get(struct nvkm_notify *notify)
63{
64 struct nvkm_event *event = notify->event;
65 unsigned long flags;
66 if (likely(event) &&
67 !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
68 spin_lock_irqsave(&event->refs_lock, flags);
69 nvkm_notify_get_locked(notify);
70 spin_unlock_irqrestore(&event->refs_lock, flags);
71 }
72}
73
74static inline void
75nvkm_notify_func(struct nvkm_notify *notify)
76{
77 struct nvkm_event *event = notify->event;
78 int ret = notify->func(notify);
79 unsigned long flags;
80 if ((ret == NVKM_NOTIFY_KEEP) ||
81 !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
82 spin_lock_irqsave(&event->refs_lock, flags);
83 nvkm_notify_get_locked(notify);
84 spin_unlock_irqrestore(&event->refs_lock, flags);
85 }
86}
87
88static void
89nvkm_notify_work(struct work_struct *work)
90{
91 struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
92 nvkm_notify_func(notify);
93}
94
95void
96nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
97{
98 struct nvkm_event *event = notify->event;
99 unsigned long flags;
100
101 BUG_ON(!spin_is_locked(&event->list_lock));
102 BUG_ON(size != notify->size);
103
104 spin_lock_irqsave(&event->refs_lock, flags);
105 if (notify->block) {
106 spin_unlock_irqrestore(&event->refs_lock, flags);
107 return;
108 }
109 nvkm_notify_put_locked(notify);
110 spin_unlock_irqrestore(&event->refs_lock, flags);
111
112 if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
113 memcpy((void *)notify->data, data, size);
114 schedule_work(&notify->work);
115 } else {
116 notify->data = data;
117 nvkm_notify_func(notify);
118 notify->data = NULL;
119 }
120}
121
122void
123nvkm_notify_fini(struct nvkm_notify *notify)
124{
125 unsigned long flags;
126 if (notify->event) {
127 nvkm_notify_put(notify);
128 spin_lock_irqsave(&notify->event->list_lock, flags);
129 list_del(&notify->head);
130 spin_unlock_irqrestore(&notify->event->list_lock, flags);
131 kfree((void *)notify->data);
132 notify->event = NULL;
133 }
134}
135
136int
137nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *),
138 bool work, void *data, u32 size, u32 reply,
139 struct nvkm_notify *notify)
140{
141 unsigned long flags;
142 int ret = -ENODEV;
143 if ((notify->event = event), event->refs) {
144 ret = event->func->ctor(data, size, notify);
145 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
146 notify->flags = 0;
147 notify->block = 1;
148 notify->func = func;
149 notify->data = NULL;
150 if (ret = 0, work) {
151 INIT_WORK(&notify->work, nvkm_notify_work);
152 set_bit(NVKM_NOTIFY_WORK, &notify->flags);
153 notify->data = kmalloc(reply, GFP_KERNEL);
154 if (!notify->data)
155 ret = -ENOMEM;
156 }
157 }
158 if (ret == 0) {
159 spin_lock_irqsave(&event->list_lock, flags);
160 list_add_tail(&notify->head, &event->list);
161 spin_unlock_irqrestore(&event->list_lock, flags);
162 }
163 }
164 if (ret)
165 notify->event = NULL;
166 return ret;
167}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 124538555904..b08630577c82 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -23,9 +23,6 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/parent.h>
27#include <core/namedb.h>
28#include <core/handle.h>
29#include <core/engine.h> 26#include <core/engine.h>
30 27
31#ifdef NOUVEAU_OBJECT_MAGIC 28#ifdef NOUVEAU_OBJECT_MAGIC
@@ -61,21 +58,15 @@ nouveau_object_create_(struct nouveau_object *parent,
61 return 0; 58 return 0;
62} 59}
63 60
64static int 61int
65_nouveau_object_ctor(struct nouveau_object *parent, 62_nouveau_object_ctor(struct nouveau_object *parent,
66 struct nouveau_object *engine, 63 struct nouveau_object *engine,
67 struct nouveau_oclass *oclass, void *data, u32 size, 64 struct nouveau_oclass *oclass, void *data, u32 size,
68 struct nouveau_object **pobject) 65 struct nouveau_object **pobject)
69{ 66{
70 struct nouveau_object *object; 67 if (size != 0)
71 int ret; 68 return -ENOSYS;
72 69 return nouveau_object_create(parent, engine, oclass, 0, pobject);
73 ret = nouveau_object_create(parent, engine, oclass, 0, &object);
74 *pobject = nv_object(object);
75 if (ret)
76 return ret;
77
78 return 0;
79} 70}
80 71
81void 72void
@@ -91,42 +82,24 @@ nouveau_object_destroy(struct nouveau_object *object)
91 kfree(object); 82 kfree(object);
92} 83}
93 84
94static void
95_nouveau_object_dtor(struct nouveau_object *object)
96{
97 nouveau_object_destroy(object);
98}
99
100int 85int
101nouveau_object_init(struct nouveau_object *object) 86nouveau_object_init(struct nouveau_object *object)
102{ 87{
103 return 0; 88 return 0;
104} 89}
105 90
106static int
107_nouveau_object_init(struct nouveau_object *object)
108{
109 return nouveau_object_init(object);
110}
111
112int 91int
113nouveau_object_fini(struct nouveau_object *object, bool suspend) 92nouveau_object_fini(struct nouveau_object *object, bool suspend)
114{ 93{
115 return 0; 94 return 0;
116} 95}
117 96
118static int
119_nouveau_object_fini(struct nouveau_object *object, bool suspend)
120{
121 return nouveau_object_fini(object, suspend);
122}
123
124struct nouveau_ofuncs 97struct nouveau_ofuncs
125nouveau_object_ofuncs = { 98nouveau_object_ofuncs = {
126 .ctor = _nouveau_object_ctor, 99 .ctor = _nouveau_object_ctor,
127 .dtor = _nouveau_object_dtor, 100 .dtor = nouveau_object_destroy,
128 .init = _nouveau_object_init, 101 .init = nouveau_object_init,
129 .fini = _nouveau_object_fini, 102 .fini = nouveau_object_fini,
130}; 103};
131 104
132int 105int
@@ -189,119 +162,6 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
189} 162}
190 163
191int 164int
192nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
193 u16 _oclass, void *data, u32 size,
194 struct nouveau_object **pobject)
195{
196 struct nouveau_object *parent = NULL;
197 struct nouveau_object *engctx = NULL;
198 struct nouveau_object *object = NULL;
199 struct nouveau_object *engine;
200 struct nouveau_oclass *oclass;
201 struct nouveau_handle *handle;
202 int ret;
203
204 /* lookup parent object and ensure it *is* a parent */
205 parent = nouveau_handle_ref(client, _parent);
206 if (!parent) {
207 nv_error(client, "parent 0x%08x not found\n", _parent);
208 return -ENOENT;
209 }
210
211 if (!nv_iclass(parent, NV_PARENT_CLASS)) {
212 nv_error(parent, "cannot have children\n");
213 ret = -EINVAL;
214 goto fail_class;
215 }
216
217 /* check that parent supports the requested subclass */
218 ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
219 if (ret) {
220 nv_debug(parent, "illegal class 0x%04x\n", _oclass);
221 goto fail_class;
222 }
223
224 /* make sure engine init has been completed *before* any objects
225 * it controls are created - the constructors may depend on
226 * state calculated at init (ie. default context construction)
227 */
228 if (engine) {
229 ret = nouveau_object_inc(engine);
230 if (ret)
231 goto fail_class;
232 }
233
234 /* if engine requires it, create a context object to insert
235 * between the parent and its children (eg. PGRAPH context)
236 */
237 if (engine && nv_engine(engine)->cclass) {
238 ret = nouveau_object_ctor(parent, engine,
239 nv_engine(engine)->cclass,
240 data, size, &engctx);
241 if (ret)
242 goto fail_engctx;
243 } else {
244 nouveau_object_ref(parent, &engctx);
245 }
246
247 /* finally, create new object and bind it to its handle */
248 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
249 *pobject = object;
250 if (ret)
251 goto fail_ctor;
252
253 ret = nouveau_object_inc(object);
254 if (ret)
255 goto fail_init;
256
257 ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
258 if (ret)
259 goto fail_handle;
260
261 ret = nouveau_handle_init(handle);
262 if (ret)
263 nouveau_handle_destroy(handle);
264
265fail_handle:
266 nouveau_object_dec(object, false);
267fail_init:
268 nouveau_object_ref(NULL, &object);
269fail_ctor:
270 nouveau_object_ref(NULL, &engctx);
271fail_engctx:
272 if (engine)
273 nouveau_object_dec(engine, false);
274fail_class:
275 nouveau_object_ref(NULL, &parent);
276 return ret;
277}
278
279int
280nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
281{
282 struct nouveau_object *parent = NULL;
283 struct nouveau_object *namedb = NULL;
284 struct nouveau_handle *handle = NULL;
285
286 parent = nouveau_handle_ref(client, _parent);
287 if (!parent)
288 return -ENOENT;
289
290 namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
291 if (namedb) {
292 handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
293 if (handle) {
294 nouveau_namedb_put(handle);
295 nouveau_handle_fini(handle, false);
296 nouveau_handle_destroy(handle);
297 }
298 }
299
300 nouveau_object_ref(NULL, &parent);
301 return handle ? 0 : -EINVAL;
302}
303
304int
305nouveau_object_inc(struct nouveau_object *object) 165nouveau_object_inc(struct nouveau_object *object)
306{ 166{
307 int ref = atomic_add_return(1, &object->usecount); 167 int ref = atomic_add_return(1, &object->usecount);
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index dee5d1235e9b..8701968a9743 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -75,6 +75,39 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
75} 75}
76 76
77int 77int
78nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
79{
80 struct nouveau_sclass *sclass;
81 struct nouveau_engine *engine;
82 struct nouveau_oclass *oclass;
83 int nr = -1, i;
84 u64 mask;
85
86 sclass = nv_parent(parent)->sclass;
87 while (sclass) {
88 if (++nr < size)
89 lclass[nr] = sclass->oclass->handle;
90 sclass = sclass->sclass;
91 }
92
93 mask = nv_parent(parent)->engine;
94 while (i = __ffs64(mask), mask) {
95 engine = nouveau_engine(parent, i);
96 if (engine && (oclass = engine->sclass)) {
97 while (oclass->ofuncs) {
98 if (++nr < size)
99 lclass[nr] = oclass->handle;
100 oclass++;
101 }
102 }
103
104 mask &= ~(1ULL << i);
105 }
106
107 return nr + 1;
108}
109
110int
78nouveau_parent_create_(struct nouveau_object *parent, 111nouveau_parent_create_(struct nouveau_object *parent,
79 struct nouveau_object *engine, 112 struct nouveau_object *engine,
80 struct nouveau_oclass *oclass, u32 pclass, 113 struct nouveau_oclass *oclass, u32 pclass,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index f31527733e00..abb410ef09ea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -30,7 +30,6 @@
30#include <subdev/vm.h> 30#include <subdev/vm.h>
31 31
32#include <core/client.h> 32#include <core/client.h>
33#include <core/class.h>
34#include <core/enum.h> 33#include <core/enum.h>
35 34
36 35
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index ac3291f781f6..9261694d0d35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -26,9 +26,7 @@
26#include <engine/fifo.h> 26#include <engine/fifo.h>
27#include <engine/copy.h> 27#include <engine/copy.h>
28 28
29#include <core/class.h>
30#include <core/enum.h> 29#include <core/enum.h>
31#include <core/class.h>
32#include <core/enum.h> 30#include <core/enum.h>
33 31
34#include "fuc/nvc0.fuc.h" 32#include "fuc/nvc0.fuc.h"
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 748a61eb3c6f..c7194b354605 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -24,7 +24,6 @@
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/enum.h> 26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h> 27#include <core/engctx.h>
29 28
30#include <engine/copy.h> 29#include <engine/copy.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 2551dafbec73..ea5c42f31791 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -25,7 +25,6 @@
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/enum.h> 27#include <core/enum.h>
28#include <core/class.h>
29#include <core/engctx.h> 28#include <core/engctx.h>
30#include <core/gpuobj.h> 29#include <core/gpuobj.h>
31 30
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index c7082377ec76..5571c09534cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -25,7 +25,6 @@
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/enum.h> 27#include <core/enum.h>
28#include <core/class.h>
29#include <core/engctx.h> 28#include <core/engctx.h>
30 29
31#include <subdev/timer.h> 30#include <subdev/timer.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
new file mode 100644
index 000000000000..4dbf0ba89e5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "acpi.h"
26
27#ifdef CONFIG_ACPI
28static int
29nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
30{
31 struct nouveau_device *device =
32 container_of(nb, typeof(*device), acpi.nb);
33 struct acpi_bus_event *info = data;
34
35 if (!strcmp(info->device_class, "ac_adapter"))
36 nvkm_event_send(&device->event, 1, 0, NULL, 0);
37
38 return NOTIFY_DONE;
39}
40#endif
41
42int
43nvkm_acpi_fini(struct nouveau_device *device, bool suspend)
44{
45#ifdef CONFIG_ACPI
46 unregister_acpi_notifier(&device->acpi.nb);
47#endif
48 return 0;
49}
50
51int
52nvkm_acpi_init(struct nouveau_device *device)
53{
54#ifdef CONFIG_ACPI
55 device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
56 register_acpi_notifier(&device->acpi.nb);
57#endif
58 return 0;
59}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
new file mode 100644
index 000000000000..cc49f4f568cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
@@ -0,0 +1,9 @@
1#ifndef __NVKM_DEVICE_ACPI_H__
2#define __NVKM_DEVICE_ACPI_H__
3
4#include <engine/device.h>
5
6int nvkm_acpi_init(struct nouveau_device *);
7int nvkm_acpi_fini(struct nouveau_device *, bool);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 18c8c7245b73..8928f7981d4a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -26,10 +26,14 @@
26#include <core/device.h> 26#include <core/device.h>
27#include <core/client.h> 27#include <core/client.h>
28#include <core/option.h> 28#include <core/option.h>
29#include <nvif/unpack.h>
30#include <nvif/class.h>
29 31
30#include <core/class.h> 32#include <subdev/fb.h>
33#include <subdev/instmem.h>
31 34
32#include "priv.h" 35#include "priv.h"
36#include "acpi.h"
33 37
34static DEFINE_MUTEX(nv_devices_mutex); 38static DEFINE_MUTEX(nv_devices_mutex);
35static LIST_HEAD(nv_devices); 39static LIST_HEAD(nv_devices);
@@ -49,74 +53,258 @@ nouveau_device_find(u64 name)
49 return match; 53 return match;
50} 54}
51 55
56int
57nouveau_device_list(u64 *name, int size)
58{
59 struct nouveau_device *device;
60 int nr = 0;
61 mutex_lock(&nv_devices_mutex);
62 list_for_each_entry(device, &nv_devices, head) {
63 if (nr++ < size)
64 name[nr - 1] = device->handle;
65 }
66 mutex_unlock(&nv_devices_mutex);
67 return nr;
68}
69
52/****************************************************************************** 70/******************************************************************************
53 * nouveau_devobj (0x0080): class implementation 71 * nouveau_devobj (0x0080): class implementation
54 *****************************************************************************/ 72 *****************************************************************************/
73
55struct nouveau_devobj { 74struct nouveau_devobj {
56 struct nouveau_parent base; 75 struct nouveau_parent base;
57 struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; 76 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
58}; 77};
59 78
79static int
80nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
81{
82 struct nouveau_device *device = nv_device(object);
83 struct nouveau_fb *pfb = nouveau_fb(device);
84 struct nouveau_instmem *imem = nouveau_instmem(device);
85 union {
86 struct nv_device_info_v0 v0;
87 } *args = data;
88 int ret;
89
90 nv_ioctl(object, "device info size %d\n", size);
91 if (nvif_unpack(args->v0, 0, 0, false)) {
92 nv_ioctl(object, "device info vers %d\n", args->v0.version);
93 } else
94 return ret;
95
96 switch (device->chipset) {
97 case 0x01a:
98 case 0x01f:
99 case 0x04c:
100 case 0x04e:
101 case 0x063:
102 case 0x067:
103 case 0x068:
104 case 0x0aa:
105 case 0x0ac:
106 case 0x0af:
107 args->v0.platform = NV_DEVICE_INFO_V0_IGP;
108 break;
109 default:
110 if (device->pdev) {
111 if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
112 args->v0.platform = NV_DEVICE_INFO_V0_AGP;
113 else
114 if (pci_is_pcie(device->pdev))
115 args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
116 else
117 args->v0.platform = NV_DEVICE_INFO_V0_PCI;
118 } else {
119 args->v0.platform = NV_DEVICE_INFO_V0_SOC;
120 }
121 break;
122 }
123
124 switch (device->card_type) {
125 case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
126 case NV_10:
127 case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
128 case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
129 case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
130 case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
131 case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
132 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
133 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
134 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
135 default:
136 args->v0.family = 0;
137 break;
138 }
139
140 args->v0.chipset = device->chipset;
141 args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
142 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
143 else args->v0.ram_size = args->v0.ram_user = 0;
144 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
145 return 0;
146}
147
148static int
149nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
150 void *data, u32 size)
151{
152 switch (mthd) {
153 case NV_DEVICE_V0_INFO:
154 return nouveau_devobj_info(object, data, size);
155 default:
156 break;
157 }
158 return -EINVAL;
159}
160
161static u8
162nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
163{
164 return nv_rd08(object->engine, addr);
165}
166
167static u16
168nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
169{
170 return nv_rd16(object->engine, addr);
171}
172
173static u32
174nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
175{
176 return nv_rd32(object->engine, addr);
177}
178
179static void
180nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
181{
182 nv_wr08(object->engine, addr, data);
183}
184
185static void
186nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
187{
188 nv_wr16(object->engine, addr, data);
189}
190
191static void
192nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
193{
194 nv_wr32(object->engine, addr, data);
195}
196
197static int
198nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size)
199{
200 struct nouveau_device *device = nv_device(object);
201 *addr = nv_device_resource_start(device, 0);
202 *size = nv_device_resource_len(device, 0);
203 return 0;
204}
205
60static const u64 disable_map[] = { 206static const u64 disable_map[] = {
61 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, 207 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
62 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, 208 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
63 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, 209 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
64 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, 210 [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
65 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, 211 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE,
66 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, 212 [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, 213 [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
68 [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE, 214 [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
69 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, 215 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
70 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, 216 [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
71 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, 217 [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
72 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, 218 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
73 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, 219 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
74 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, 220 [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE,
75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, 221 [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, 222 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, 223 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
78 [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE, 224 [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
79 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, 225 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
80 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE, 226 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
81 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, 227 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
82 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, 228 [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
83 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, 229 [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH,
84 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, 230 [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
85 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, 231 [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
86 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP, 232 [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
87 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT, 233 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT,
88 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP, 234 [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
89 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, 235 [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
90 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, 236 [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
91 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, 237 [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
92 [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC, 238 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
93 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, 239 [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
94 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, 240 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
95 [NVDEV_SUBDEV_NR] = 0, 241 [NVDEV_SUBDEV_NR] = 0,
96}; 242};
97 243
244static void
245nouveau_devobj_dtor(struct nouveau_object *object)
246{
247 struct nouveau_devobj *devobj = (void *)object;
248 int i;
249
250 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
251 nouveau_object_ref(NULL, &devobj->subdev[i]);
252
253 nouveau_parent_destroy(&devobj->base);
254}
255
256static struct nouveau_oclass
257nouveau_devobj_oclass_super = {
258 .handle = NV_DEVICE,
259 .ofuncs = &(struct nouveau_ofuncs) {
260 .dtor = nouveau_devobj_dtor,
261 .init = _nouveau_parent_init,
262 .fini = _nouveau_parent_fini,
263 .mthd = nouveau_devobj_mthd,
264 .map = nouveau_devobj_map,
265 .rd08 = nouveau_devobj_rd08,
266 .rd16 = nouveau_devobj_rd16,
267 .rd32 = nouveau_devobj_rd32,
268 .wr08 = nouveau_devobj_wr08,
269 .wr16 = nouveau_devobj_wr16,
270 .wr32 = nouveau_devobj_wr32,
271 }
272};
273
98static int 274static int
99nouveau_devobj_ctor(struct nouveau_object *parent, 275nouveau_devobj_ctor(struct nouveau_object *parent,
100 struct nouveau_object *engine, 276 struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size, 277 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject) 278 struct nouveau_object **pobject)
103{ 279{
280 union {
281 struct nv_device_v0 v0;
282 } *args = data;
104 struct nouveau_client *client = nv_client(parent); 283 struct nouveau_client *client = nv_client(parent);
105 struct nouveau_device *device; 284 struct nouveau_device *device;
106 struct nouveau_devobj *devobj; 285 struct nouveau_devobj *devobj;
107 struct nv_device_class *args = data;
108 u32 boot0, strap; 286 u32 boot0, strap;
109 u64 disable, mmio_base, mmio_size; 287 u64 disable, mmio_base, mmio_size;
110 void __iomem *map; 288 void __iomem *map;
111 int ret, i, c; 289 int ret, i, c;
112 290
113 if (size < sizeof(struct nv_device_class)) 291 nv_ioctl(parent, "create device size %d\n", size);
114 return -EINVAL; 292 if (nvif_unpack(args->v0, 0, 0, false)) {
293 nv_ioctl(parent, "create device v%d device %016llx "
294 "disable %016llx debug0 %016llx\n",
295 args->v0.version, args->v0.device,
296 args->v0.disable, args->v0.debug0);
297 } else
298 return ret;
299
300 /* give priviledged clients register access */
301 if (client->super)
302 oclass = &nouveau_devobj_oclass_super;
115 303
116 /* find the device subdev that matches what the client requested */ 304 /* find the device subdev that matches what the client requested */
117 device = nv_device(client->device); 305 device = nv_device(client->device);
118 if (args->device != ~0) { 306 if (args->v0.device != ~0) {
119 device = nouveau_device_find(args->device); 307 device = nouveau_device_find(args->v0.device);
120 if (!device) 308 if (!device)
121 return -ENODEV; 309 return -ENODEV;
122 } 310 }
@@ -135,14 +323,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
135 mmio_size = nv_device_resource_len(device, 0); 323 mmio_size = nv_device_resource_len(device, 0);
136 324
137 /* translate api disable mask into internal mapping */ 325 /* translate api disable mask into internal mapping */
138 disable = args->debug0; 326 disable = args->v0.debug0;
139 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 327 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
140 if (args->disable & disable_map[i]) 328 if (args->v0.disable & disable_map[i])
141 disable |= (1ULL << i); 329 disable |= (1ULL << i);
142 } 330 }
143 331
144 /* identify the chipset, and determine classes of subdev/engines */ 332 /* identify the chipset, and determine classes of subdev/engines */
145 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) && 333 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
146 !device->card_type) { 334 !device->card_type) {
147 map = ioremap(mmio_base, 0x102000); 335 map = ioremap(mmio_base, 0x102000);
148 if (map == NULL) 336 if (map == NULL)
@@ -180,8 +368,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
180 case 0x080: 368 case 0x080:
181 case 0x090: 369 case 0x090:
182 case 0x0a0: device->card_type = NV_50; break; 370 case 0x0a0: device->card_type = NV_50; break;
183 case 0x0c0: device->card_type = NV_C0; break; 371 case 0x0c0:
184 case 0x0d0: device->card_type = NV_D0; break; 372 case 0x0d0: device->card_type = NV_C0; break;
185 case 0x0e0: 373 case 0x0e0:
186 case 0x0f0: 374 case 0x0f0:
187 case 0x100: device->card_type = NV_E0; break; 375 case 0x100: device->card_type = NV_E0; break;
@@ -206,8 +394,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
206 case NV_30: ret = nv30_identify(device); break; 394 case NV_30: ret = nv30_identify(device); break;
207 case NV_40: ret = nv40_identify(device); break; 395 case NV_40: ret = nv40_identify(device); break;
208 case NV_50: ret = nv50_identify(device); break; 396 case NV_50: ret = nv50_identify(device); break;
209 case NV_C0: 397 case NV_C0: ret = nvc0_identify(device); break;
210 case NV_D0: ret = nvc0_identify(device); break;
211 case NV_E0: ret = nve0_identify(device); break; 398 case NV_E0: ret = nve0_identify(device); break;
212 case GM100: ret = gm100_identify(device); break; 399 case GM100: ret = gm100_identify(device); break;
213 default: 400 default:
@@ -242,7 +429,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
242 nv_debug(device, "crystal freq: %dKHz\n", device->crystal); 429 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
243 } 430 }
244 431
245 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) && 432 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
246 !nv_subdev(device)->mmio) { 433 !nv_subdev(device)->mmio) {
247 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); 434 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
248 if (!nv_subdev(device)->mmio) { 435 if (!nv_subdev(device)->mmio) {
@@ -298,71 +485,19 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
298 return 0; 485 return 0;
299} 486}
300 487
301static void
302nouveau_devobj_dtor(struct nouveau_object *object)
303{
304 struct nouveau_devobj *devobj = (void *)object;
305 int i;
306
307 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
308 nouveau_object_ref(NULL, &devobj->subdev[i]);
309
310 nouveau_parent_destroy(&devobj->base);
311}
312
313static u8
314nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
315{
316 return nv_rd08(object->engine, addr);
317}
318
319static u16
320nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
321{
322 return nv_rd16(object->engine, addr);
323}
324
325static u32
326nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
327{
328 return nv_rd32(object->engine, addr);
329}
330
331static void
332nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
333{
334 nv_wr08(object->engine, addr, data);
335}
336
337static void
338nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
339{
340 nv_wr16(object->engine, addr, data);
341}
342
343static void
344nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
345{
346 nv_wr32(object->engine, addr, data);
347}
348
349static struct nouveau_ofuncs 488static struct nouveau_ofuncs
350nouveau_devobj_ofuncs = { 489nouveau_devobj_ofuncs = {
351 .ctor = nouveau_devobj_ctor, 490 .ctor = nouveau_devobj_ctor,
352 .dtor = nouveau_devobj_dtor, 491 .dtor = nouveau_devobj_dtor,
353 .init = _nouveau_parent_init, 492 .init = _nouveau_parent_init,
354 .fini = _nouveau_parent_fini, 493 .fini = _nouveau_parent_fini,
355 .rd08 = nouveau_devobj_rd08, 494 .mthd = nouveau_devobj_mthd,
356 .rd16 = nouveau_devobj_rd16,
357 .rd32 = nouveau_devobj_rd32,
358 .wr08 = nouveau_devobj_wr08,
359 .wr16 = nouveau_devobj_wr16,
360 .wr32 = nouveau_devobj_wr32,
361}; 495};
362 496
363/****************************************************************************** 497/******************************************************************************
364 * nouveau_device: engine functions 498 * nouveau_device: engine functions
365 *****************************************************************************/ 499 *****************************************************************************/
500
366static struct nouveau_oclass 501static struct nouveau_oclass
367nouveau_device_sclass[] = { 502nouveau_device_sclass[] = {
368 { 0x0080, &nouveau_devobj_ofuncs }, 503 { 0x0080, &nouveau_devobj_ofuncs },
@@ -370,6 +505,23 @@ nouveau_device_sclass[] = {
370}; 505};
371 506
372static int 507static int
508nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
509{
510 if (!WARN_ON(size != 0)) {
511 notify->size = 0;
512 notify->types = 1;
513 notify->index = 0;
514 return 0;
515 }
516 return -EINVAL;
517}
518
519static const struct nvkm_event_func
520nouveau_device_event_func = {
521 .ctor = nouveau_device_event_ctor,
522};
523
524static int
373nouveau_device_fini(struct nouveau_object *object, bool suspend) 525nouveau_device_fini(struct nouveau_object *object, bool suspend)
374{ 526{
375 struct nouveau_device *device = (void *)object; 527 struct nouveau_device *device = (void *)object;
@@ -386,7 +538,7 @@ nouveau_device_fini(struct nouveau_object *object, bool suspend)
386 } 538 }
387 } 539 }
388 540
389 ret = 0; 541 ret = nvkm_acpi_fini(device, suspend);
390fail: 542fail:
391 for (; ret && i < NVDEV_SUBDEV_NR; i++) { 543 for (; ret && i < NVDEV_SUBDEV_NR; i++) {
392 if ((subdev = device->subdev[i])) { 544 if ((subdev = device->subdev[i])) {
@@ -407,7 +559,11 @@ nouveau_device_init(struct nouveau_object *object)
407{ 559{
408 struct nouveau_device *device = (void *)object; 560 struct nouveau_device *device = (void *)object;
409 struct nouveau_object *subdev; 561 struct nouveau_object *subdev;
410 int ret, i; 562 int ret, i = 0;
563
564 ret = nvkm_acpi_init(device);
565 if (ret)
566 goto fail;
411 567
412 for (i = 0; i < NVDEV_SUBDEV_NR; i++) { 568 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
413 if ((subdev = device->subdev[i])) { 569 if ((subdev = device->subdev[i])) {
@@ -430,6 +586,8 @@ fail:
430 } 586 }
431 } 587 }
432 588
589 if (ret)
590 nvkm_acpi_fini(device, false);
433 return ret; 591 return ret;
434} 592}
435 593
@@ -438,6 +596,8 @@ nouveau_device_dtor(struct nouveau_object *object)
438{ 596{
439 struct nouveau_device *device = (void *)object; 597 struct nouveau_device *device = (void *)object;
440 598
599 nvkm_event_fini(&device->event);
600
441 mutex_lock(&nv_devices_mutex); 601 mutex_lock(&nv_devices_mutex);
442 list_del(&device->head); 602 list_del(&device->head);
443 mutex_unlock(&nv_devices_mutex); 603 mutex_unlock(&nv_devices_mutex);
@@ -478,31 +638,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
478 } 638 }
479} 639}
480 640
481dma_addr_t
482nv_device_map_page(struct nouveau_device *device, struct page *page)
483{
484 dma_addr_t ret;
485
486 if (nv_device_is_pci(device)) {
487 ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
488 PCI_DMA_BIDIRECTIONAL);
489 if (pci_dma_mapping_error(device->pdev, ret))
490 ret = 0;
491 } else {
492 ret = page_to_phys(page);
493 }
494
495 return ret;
496}
497
498void
499nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
500{
501 if (nv_device_is_pci(device))
502 pci_unmap_page(device->pdev, addr, PAGE_SIZE,
503 PCI_DMA_BIDIRECTIONAL);
504}
505
506int 641int
507nv_device_get_irq(struct nouveau_device *device, bool stall) 642nv_device_get_irq(struct nouveau_device *device, bool stall)
508{ 643{
@@ -560,6 +695,9 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
560 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); 695 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
561 nv_engine(device)->sclass = nouveau_device_sclass; 696 nv_engine(device)->sclass = nouveau_device_sclass;
562 list_add(&device->head, &nv_devices); 697 list_add(&device->head, &nv_devices);
698
699 ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
700 &device->event);
563done: 701done:
564 mutex_unlock(&nv_devices_mutex); 702 mutex_unlock(&nv_devices_mutex);
565 return ret; 703 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
index 4b69bf56ed01..e34101a3490e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -22,55 +22,82 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/object.h> 26#include <core/object.h>
26#include <core/class.h> 27#include <nvif/unpack.h>
28#include <nvif/class.h>
29#include <nvif/ioctl.h>
27 30
28#include <subdev/clock.h> 31#include <subdev/clock.h>
29 32
30#include "priv.h" 33#include "priv.h"
31 34
32static int 35static int
33nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd, 36nouveau_control_mthd_pstate_info(struct nouveau_object *object,
34 void *data, u32 size) 37 void *data, u32 size)
35{ 38{
39 union {
40 struct nvif_control_pstate_info_v0 v0;
41 } *args = data;
36 struct nouveau_clock *clk = nouveau_clock(object); 42 struct nouveau_clock *clk = nouveau_clock(object);
37 struct nv_control_pstate_info *args = data; 43 int ret;
38 44
39 if (size < sizeof(*args)) 45 nv_ioctl(object, "control pstate info size %d\n", size);
40 return -EINVAL; 46 if (nvif_unpack(args->v0, 0, 0, false)) {
47 nv_ioctl(object, "control pstate info vers %d\n",
48 args->v0.version);
49 } else
50 return ret;
41 51
42 if (clk) { 52 if (clk) {
43 args->count = clk->state_nr; 53 args->v0.count = clk->state_nr;
44 args->ustate = clk->ustate; 54 args->v0.ustate_ac = clk->ustate_ac;
45 args->pstate = clk->pstate; 55 args->v0.ustate_dc = clk->ustate_dc;
56 args->v0.pwrsrc = clk->pwrsrc;
57 args->v0.pstate = clk->pstate;
46 } else { 58 } else {
47 args->count = 0; 59 args->v0.count = 0;
48 args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE; 60 args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
49 args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN; 61 args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
62 args->v0.pwrsrc = -ENOSYS;
63 args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
50 } 64 }
51 65
52 return 0; 66 return 0;
53} 67}
54 68
55static int 69static int
56nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, 70nouveau_control_mthd_pstate_attr(struct nouveau_object *object,
57 void *data, u32 size) 71 void *data, u32 size)
58{ 72{
73 union {
74 struct nvif_control_pstate_attr_v0 v0;
75 } *args = data;
59 struct nouveau_clock *clk = nouveau_clock(object); 76 struct nouveau_clock *clk = nouveau_clock(object);
60 struct nv_control_pstate_attr *args = data;
61 struct nouveau_clocks *domain; 77 struct nouveau_clocks *domain;
62 struct nouveau_pstate *pstate; 78 struct nouveau_pstate *pstate;
63 struct nouveau_cstate *cstate; 79 struct nouveau_cstate *cstate;
64 int i = 0, j = -1; 80 int i = 0, j = -1;
65 u32 lo, hi; 81 u32 lo, hi;
66 82 int ret;
67 if ((size < sizeof(*args)) || !clk || 83
68 (args->state >= 0 && args->state >= clk->state_nr)) 84 nv_ioctl(object, "control pstate attr size %d\n", size);
69 return -EINVAL; 85 if (nvif_unpack(args->v0, 0, 0, false)) {
86 nv_ioctl(object, "control pstate attr vers %d state %d "
87 "index %d\n",
88 args->v0.version, args->v0.state, args->v0.index);
89 if (!clk)
90 return -ENODEV;
91 if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
92 return -EINVAL;
93 if (args->v0.state >= clk->state_nr)
94 return -EINVAL;
95 } else
96 return ret;
70 domain = clk->domains; 97 domain = clk->domains;
71 98
72 while (domain->name != nv_clk_src_max) { 99 while (domain->name != nv_clk_src_max) {
73 if (domain->mname && ++j == args->index) 100 if (domain->mname && ++j == args->v0.index)
74 break; 101 break;
75 domain++; 102 domain++;
76 } 103 }
@@ -78,9 +105,9 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
78 if (domain->name == nv_clk_src_max) 105 if (domain->name == nv_clk_src_max)
79 return -EINVAL; 106 return -EINVAL;
80 107
81 if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) { 108 if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) {
82 list_for_each_entry(pstate, &clk->states, head) { 109 list_for_each_entry(pstate, &clk->states, head) {
83 if (i++ == args->state) 110 if (i++ == args->v0.state)
84 break; 111 break;
85 } 112 }
86 113
@@ -91,21 +118,21 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
91 hi = max(hi, cstate->domain[domain->name]); 118 hi = max(hi, cstate->domain[domain->name]);
92 } 119 }
93 120
94 args->state = pstate->pstate; 121 args->v0.state = pstate->pstate;
95 } else { 122 } else {
96 lo = max(clk->read(clk, domain->name), 0); 123 lo = max(clk->read(clk, domain->name), 0);
97 hi = lo; 124 hi = lo;
98 } 125 }
99 126
100 snprintf(args->name, sizeof(args->name), "%s", domain->mname); 127 snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname);
101 snprintf(args->unit, sizeof(args->unit), "MHz"); 128 snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz");
102 args->min = lo / domain->mdiv; 129 args->v0.min = lo / domain->mdiv;
103 args->max = hi / domain->mdiv; 130 args->v0.max = hi / domain->mdiv;
104 131
105 args->index = 0; 132 args->v0.index = 0;
106 while ((++domain)->name != nv_clk_src_max) { 133 while ((++domain)->name != nv_clk_src_max) {
107 if (domain->mname) { 134 if (domain->mname) {
108 args->index = ++j; 135 args->v0.index = ++j;
109 break; 136 break;
110 } 137 }
111 } 138 }
@@ -114,31 +141,65 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
114} 141}
115 142
116static int 143static int
117nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd, 144nouveau_control_mthd_pstate_user(struct nouveau_object *object,
118 void *data, u32 size) 145 void *data, u32 size)
119{ 146{
147 union {
148 struct nvif_control_pstate_user_v0 v0;
149 } *args = data;
120 struct nouveau_clock *clk = nouveau_clock(object); 150 struct nouveau_clock *clk = nouveau_clock(object);
121 struct nv_control_pstate_user *args = data; 151 int ret;
152
153 nv_ioctl(object, "control pstate user size %d\n", size);
154 if (nvif_unpack(args->v0, 0, 0, false)) {
155 nv_ioctl(object, "control pstate user vers %d ustate %d "
156 "pwrsrc %d\n", args->v0.version,
157 args->v0.ustate, args->v0.pwrsrc);
158 if (!clk)
159 return -ENODEV;
160 } else
161 return ret;
162
163 if (args->v0.pwrsrc >= 0) {
164 ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
165 } else {
166 ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0);
167 ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1);
168 }
122 169
123 if (size < sizeof(*args) || !clk) 170 return ret;
124 return -EINVAL; 171}
125 172
126 return nouveau_clock_ustate(clk, args->state); 173static int
174nouveau_control_mthd(struct nouveau_object *object, u32 mthd,
175 void *data, u32 size)
176{
177 switch (mthd) {
178 case NVIF_CONTROL_PSTATE_INFO:
179 return nouveau_control_mthd_pstate_info(object, data, size);
180 case NVIF_CONTROL_PSTATE_ATTR:
181 return nouveau_control_mthd_pstate_attr(object, data, size);
182 case NVIF_CONTROL_PSTATE_USER:
183 return nouveau_control_mthd_pstate_user(object, data, size);
184 default:
185 break;
186 }
187 return -EINVAL;
127} 188}
128 189
190static struct nouveau_ofuncs
191nouveau_control_ofuncs = {
192 .ctor = _nouveau_object_ctor,
193 .dtor = nouveau_object_destroy,
194 .init = nouveau_object_init,
195 .fini = nouveau_object_fini,
196 .mthd = nouveau_control_mthd,
197};
198
129struct nouveau_oclass 199struct nouveau_oclass
130nouveau_control_oclass[] = { 200nouveau_control_oclass[] = {
131 { .handle = NV_CONTROL_CLASS, 201 { .handle = NVIF_IOCTL_NEW_V0_CONTROL,
132 .ofuncs = &nouveau_object_ofuncs, 202 .ofuncs = &nouveau_control_ofuncs
133 .omthds = (struct nouveau_omthds[]) {
134 { NV_CONTROL_PSTATE_INFO,
135 NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
136 { NV_CONTROL_PSTATE_ATTR,
137 NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
138 { NV_CONTROL_PSTATE_USER,
139 NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
140 {},
141 },
142 }, 203 },
143 {} 204 {}
144}; 205};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index a520029e25d9..377ec0b8851e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -33,7 +33,7 @@
33#include <subdev/mc.h> 33#include <subdev/mc.h>
34#include <subdev/timer.h> 34#include <subdev/timer.h>
35#include <subdev/fb.h> 35#include <subdev/fb.h>
36#include <subdev/ltcg.h> 36#include <subdev/ltc.h>
37#include <subdev/ibus.h> 37#include <subdev/ibus.h>
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
@@ -68,20 +68,20 @@ gm100_identify(struct nouveau_device *device)
68#endif 68#endif
69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 69 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
70 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
71 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 72 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
73 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; 73 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
74 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass; 75 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
76 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 76 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
77 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 77 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
78 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 78 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
79 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 79 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
80#if 0 80#if 0
81 device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; 81 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 82 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
83#endif 83#endif
84 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 84 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
85 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; 85 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
86 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 86 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
87 device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass; 87 device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 40b29d0214cb..573b55f5c2f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -56,7 +56,7 @@ nv04_identify(struct nouveau_device *device)
56 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; 60 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; 61 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
@@ -74,7 +74,7 @@ nv04_identify(struct nouveau_device *device)
74 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 5f7c25ff523d..183a85a6204e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -58,7 +58,7 @@ nv10_identify(struct nouveau_device *device)
58 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 58 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
59 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 59 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 60 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 61 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
62 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 62 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass; 63 device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
64 break; 64 break;
@@ -75,7 +75,7 @@ nv10_identify(struct nouveau_device *device)
75 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 79 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 80 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 81 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -94,7 +94,7 @@ nv10_identify(struct nouveau_device *device)
94 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 94 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
95 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 95 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 96 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 97 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 98 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 99 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 100 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -113,7 +113,7 @@ nv10_identify(struct nouveau_device *device)
113 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; 113 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
114 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 114 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 115 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 116 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 117 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 118 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 119 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -132,7 +132,7 @@ nv10_identify(struct nouveau_device *device)
132 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 132 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
133 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 133 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 134 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 135 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; 136 device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 137 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 138 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -151,7 +151,7 @@ nv10_identify(struct nouveau_device *device)
151 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 151 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 152 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 153 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 154 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 155 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 156 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 157 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -170,7 +170,7 @@ nv10_identify(struct nouveau_device *device)
170 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
171 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 171 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 172 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 173 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 174 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 175 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 176 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -189,7 +189,7 @@ nv10_identify(struct nouveau_device *device)
189 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 189 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 190 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 191 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 192 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 193 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 194 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; 195 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 75fed11bba0a..aa564c68a920 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -59,7 +59,7 @@ nv20_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
@@ -78,7 +78,7 @@ nv20_identify(struct nouveau_device *device)
78 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -97,7 +97,7 @@ nv20_identify(struct nouveau_device *device)
97 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -116,7 +116,7 @@ nv20_identify(struct nouveau_device *device)
116 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; 116 device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
117 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 117 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 118 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 119 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 120 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 121 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; 122 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 36919d7db7cc..11bd31da82ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -59,7 +59,7 @@ nv30_identify(struct nouveau_device *device)
59 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 63 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 64 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 65 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -78,7 +78,7 @@ nv30_identify(struct nouveau_device *device)
78 device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 80 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 81 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 82 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 83 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 84 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -97,7 +97,7 @@ nv30_identify(struct nouveau_device *device)
97 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 100 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 101 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 102 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; 103 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -117,7 +117,7 @@ nv30_identify(struct nouveau_device *device)
117 device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; 117 device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
118 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 118 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 119 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 120 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
121 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 121 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 122 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; 123 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -137,7 +137,7 @@ nv30_identify(struct nouveau_device *device)
137 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; 137 device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
138 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; 138 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 139 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 140 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
141 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; 141 device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 142 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; 143 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1130a62be2c7..e96c223cb797 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -65,7 +65,7 @@ nv40_identify(struct nouveau_device *device)
65 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 65 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
66 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 66 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
67 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 67 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
68 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 68 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
69 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 69 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 70 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 71 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -88,7 +88,7 @@ nv40_identify(struct nouveau_device *device)
88 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 88 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
89 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 89 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
90 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 90 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
91 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 91 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
92 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 92 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 93 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 94 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -111,7 +111,7 @@ nv40_identify(struct nouveau_device *device)
111 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 111 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
112 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 112 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
113 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 113 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
115 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 115 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 116 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 117 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -134,7 +134,7 @@ nv40_identify(struct nouveau_device *device)
134 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 134 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
135 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 135 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 136 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 137 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
138 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 138 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 139 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 140 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -157,7 +157,7 @@ nv40_identify(struct nouveau_device *device)
157 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 157 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
158 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 158 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
159 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 159 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
160 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 160 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
161 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 161 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 162 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 163 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -180,7 +180,7 @@ nv40_identify(struct nouveau_device *device)
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 180 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 182 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
184 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 184 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 185 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 186 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -203,7 +203,7 @@ nv40_identify(struct nouveau_device *device)
203 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 203 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
204 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 204 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
205 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 205 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
206 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 206 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
207 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 207 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 208 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 209 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -226,7 +226,7 @@ nv40_identify(struct nouveau_device *device)
226 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 226 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
227 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 227 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
230 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 230 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 231 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 232 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -249,7 +249,7 @@ nv40_identify(struct nouveau_device *device)
249 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 249 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
250 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 250 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
251 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 251 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 252 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
253 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 253 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 254 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 255 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -272,7 +272,7 @@ nv40_identify(struct nouveau_device *device)
272 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 272 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
273 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 273 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 274 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 275 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
276 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 276 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 277 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 278 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -295,7 +295,7 @@ nv40_identify(struct nouveau_device *device)
295 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 295 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
296 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 296 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
299 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -318,7 +318,7 @@ nv40_identify(struct nouveau_device *device)
318 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 318 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
319 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 319 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
320 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 320 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
321 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 321 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
322 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 322 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 323 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 324 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -341,7 +341,7 @@ nv40_identify(struct nouveau_device *device)
341 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 341 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
342 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 342 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
343 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 343 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
345 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 345 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 346 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 347 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -364,7 +364,7 @@ nv40_identify(struct nouveau_device *device)
364 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 364 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
365 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 365 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
366 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 366 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
367 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 367 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
368 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 368 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 369 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 370 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -387,7 +387,7 @@ nv40_identify(struct nouveau_device *device)
387 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 387 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
388 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 388 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
389 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 389 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
390 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 390 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
391 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 391 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 392 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 393 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -410,7 +410,7 @@ nv40_identify(struct nouveau_device *device)
410 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; 410 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
411 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 411 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
412 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 412 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
413 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 413 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
414 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; 414 device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; 415 device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; 416 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ef0b0bde1a91..932f84fae459 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -74,7 +74,7 @@ nv50_identify(struct nouveau_device *device)
74 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 74 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
75 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 75 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
76 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 76 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 77 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
78 device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass; 78 device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 79 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 80 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -99,7 +99,7 @@ nv50_identify(struct nouveau_device *device)
99 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
100 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 100 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
101 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 101 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
103 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 103 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
104 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 104 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
105 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 105 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -127,7 +127,7 @@ nv50_identify(struct nouveau_device *device)
127 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 127 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
128 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 128 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
129 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 129 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
130 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 130 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
131 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 131 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
132 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 132 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
133 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 133 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -155,7 +155,7 @@ nv50_identify(struct nouveau_device *device)
155 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 155 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
156 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 156 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
157 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 157 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
158 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 158 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
159 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 159 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
160 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 160 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
161 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 161 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -183,7 +183,7 @@ nv50_identify(struct nouveau_device *device)
183 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 183 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
184 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 184 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
185 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 185 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
186 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 186 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
187 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 187 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
188 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 188 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
189 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 189 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -211,7 +211,7 @@ nv50_identify(struct nouveau_device *device)
211 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 211 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
212 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 212 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 213 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 214 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
215 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 215 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
216 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 216 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
217 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 217 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
239 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 239 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
240 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 240 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
241 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 241 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
243 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 243 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
244 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 244 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
245 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 245 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -267,7 +267,7 @@ nv50_identify(struct nouveau_device *device)
267 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 267 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
268 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 268 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
271 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
272 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
273 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -295,7 +295,7 @@ nv50_identify(struct nouveau_device *device)
295 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 295 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
296 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 296 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 297 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 298 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
299 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 299 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
300 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 300 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
301 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 301 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -323,7 +323,7 @@ nv50_identify(struct nouveau_device *device)
323 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 323 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
324 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 324 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
325 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 325 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
326 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 326 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
327 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 327 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
328 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 328 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
329 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 329 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -350,9 +350,9 @@ nv50_identify(struct nouveau_device *device)
350 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 350 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
351 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 351 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
352 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 352 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
353 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; 353 device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
354 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 354 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
355 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 355 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
356 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 356 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
357 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 357 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
358 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 358 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -380,9 +380,9 @@ nv50_identify(struct nouveau_device *device)
380 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 380 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 381 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 382 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
383 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; 383 device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
384 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 384 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
385 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 385 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
386 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 386 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
387 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 387 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
388 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 388 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -409,9 +409,9 @@ nv50_identify(struct nouveau_device *device)
409 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 409 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
410 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 410 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
411 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 411 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
412 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; 412 device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
413 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 413 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
414 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 414 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
415 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 415 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
416 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 416 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
417 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 417 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -438,9 +438,9 @@ nv50_identify(struct nouveau_device *device)
438 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 438 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
439 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; 439 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
440 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; 440 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
441 device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; 441 device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
442 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 442 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
443 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; 443 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
444 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; 444 device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
445 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; 445 device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
446 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; 446 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d55ed633b19..b4a2917ce555 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -33,7 +33,7 @@
33#include <subdev/mc.h> 33#include <subdev/mc.h>
34#include <subdev/timer.h> 34#include <subdev/timer.h>
35#include <subdev/fb.h> 35#include <subdev/fb.h>
36#include <subdev/ltcg.h> 36#include <subdev/ltc.h>
37#include <subdev/ibus.h> 37#include <subdev/ibus.h>
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nvc0_identify(struct nouveau_device *device)
70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 78 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
81 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
83 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
@@ -102,14 +102,14 @@ nvc0_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 102 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
105 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 105 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 106 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
107 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 107 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 108 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
109 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 109 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
110 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 110 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
111 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 111 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 112 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 113 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 114 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
115 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; 115 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -134,14 +134,14 @@ nvc0_identify(struct nouveau_device *device)
134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 134 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 135 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 136 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
137 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 137 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 138 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
139 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 139 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 140 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
141 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 141 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
142 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 142 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
143 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 143 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 144 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 145 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 146 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
147 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; 147 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -165,14 +165,14 @@ nvc0_identify(struct nouveau_device *device)
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
168 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 168 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 169 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
170 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 170 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 171 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
172 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 172 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
173 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 173 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
174 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 174 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 175 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 176 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 177 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
178 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; 178 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -197,14 +197,14 @@ nvc0_identify(struct nouveau_device *device)
197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 197 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 200 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 201 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
202 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 202 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 203 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
204 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 204 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
205 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 205 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
206 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 206 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 207 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 208 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 209 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
210 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; 210 device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -229,14 +229,14 @@ nvc0_identify(struct nouveau_device *device)
229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 229 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 230 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 231 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
232 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 232 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 233 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
234 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 234 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 235 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
236 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 236 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
237 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 237 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
238 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 238 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 239 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
240 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 240 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
241 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 241 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
242 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; 242 device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
@@ -260,14 +260,14 @@ nvc0_identify(struct nouveau_device *device)
260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 260 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 261 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 262 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
263 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 263 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 264 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
265 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 265 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 266 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
267 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 267 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
268 device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; 268 device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 269 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 270 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
271 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 271 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
272 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 272 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
273 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; 273 device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
@@ -292,14 +292,14 @@ nvc0_identify(struct nouveau_device *device)
292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 292 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 293 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 294 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
295 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 295 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 296 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
297 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 297 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 298 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
299 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 299 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
300 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 300 device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
301 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 301 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
303 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 303 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
304 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 304 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
305 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; 305 device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
@@ -323,12 +323,12 @@ nvc0_identify(struct nouveau_device *device)
323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 323 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 324 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 325 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
326 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 326 device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; 327 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
328 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 328 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 329 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
330 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 330 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
331 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 331 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
332 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; 332 device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
333 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 333 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
334 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; 334 device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 2d1e97d4264f..54ec53bc6252 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -33,7 +33,7 @@
33#include <subdev/mc.h> 33#include <subdev/mc.h>
34#include <subdev/timer.h> 34#include <subdev/timer.h>
35#include <subdev/fb.h> 35#include <subdev/fb.h>
36#include <subdev/ltcg.h> 36#include <subdev/ltc.h>
37#include <subdev/ibus.h> 37#include <subdev/ibus.h>
38#include <subdev/instmem.h> 38#include <subdev/instmem.h>
39#include <subdev/vm.h> 39#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nve0_identify(struct nouveau_device *device)
70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 70 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 72 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 73 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 74 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 76 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 77 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
78 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 78 device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 79 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 80 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 81 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 82 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 83 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -103,14 +103,14 @@ nve0_identify(struct nouveau_device *device)
103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 103 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 104 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 105 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
106 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 106 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 107 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
108 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 108 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 109 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
110 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 110 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
111 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 111 device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
112 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 112 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
113 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 113 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 114 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 115 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 116 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -136,14 +136,14 @@ nve0_identify(struct nouveau_device *device)
136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 136 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 137 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 138 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
139 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 139 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 140 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
141 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 141 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 142 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
143 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 143 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
144 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 144 device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
145 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 145 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
146 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 146 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 147 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 148 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; 149 device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -158,15 +158,16 @@ nve0_identify(struct nouveau_device *device)
158 break; 158 break;
159 case 0xea: 159 case 0xea:
160 device->cname = "GK20A"; 160 device->cname = "GK20A";
161 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 161 device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass;
162 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
162 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 163 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
163 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; 164 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
164 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass; 165 device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
165 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass; 166 device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
166 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 167 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
167 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 168 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
168 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 169 device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
169 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 170 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
170 device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass; 171 device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
171 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 172 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
172 device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass; 173 device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
@@ -186,14 +187,14 @@ nve0_identify(struct nouveau_device *device)
186 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 187 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
187 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
188 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 189 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
189 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 190 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
190 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 191 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
191 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 192 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
192 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 193 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
193 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 194 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
194 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 195 device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
195 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 196 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
196 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 197 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
197 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 198 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
198 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 199 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
199 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; 200 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
@@ -219,17 +220,17 @@ nve0_identify(struct nouveau_device *device)
219 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 220 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
220 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 221 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
221 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 222 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
222 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 223 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
223 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 224 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
224 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 225 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
225 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 226 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
226 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 227 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
227 device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; 228 device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
228 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 229 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
229 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 230 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
230 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; 231 device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
231 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 232 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
232 device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; 233 device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass;
233 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; 234 device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
234 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 235 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
235 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 236 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
@@ -248,18 +249,18 @@ nve0_identify(struct nouveau_device *device)
248 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 249 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
249 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 250 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
250 device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; 251 device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
251 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 252 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
252 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 253 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
253 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 254 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
254 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 255 device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
255 device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; 256 device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
256 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 257 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
257 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 258 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
258 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 259 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
259 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 260 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
260 device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; 261 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
261 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 262 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; 263 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
263 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; 264 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
264 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 265 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
265 device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; 266 device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 9c38c5e40500..22d55f6cde50 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -22,23 +22,93 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <nvif/unpack.h>
27#include <nvif/class.h>
28#include <nvif/event.h>
29
25#include "priv.h" 30#include "priv.h"
26#include "outp.h" 31#include "outp.h"
27#include "conn.h" 32#include "conn.h"
28 33
34int
35nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify)
36{
37 struct nouveau_disp *disp =
38 container_of(notify->event, typeof(*disp), vblank);
39 union {
40 struct nvif_notify_head_req_v0 v0;
41 } *req = data;
42 int ret;
43
44 if (nvif_unpack(req->v0, 0, 0, false)) {
45 notify->size = sizeof(struct nvif_notify_head_rep_v0);
46 if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
47 notify->types = 1;
48 notify->index = req->v0.head;
49 return 0;
50 }
51 }
52
53 return ret;
54}
55
56void
57nouveau_disp_vblank(struct nouveau_disp *disp, int head)
58{
59 struct nvif_notify_head_rep_v0 rep = {};
60 nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
61}
62
29static int 63static int
30nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index) 64nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify)
31{ 65{
32 struct nouveau_disp *disp = event->priv; 66 struct nouveau_disp *disp =
67 container_of(notify->event, typeof(*disp), hpd);
68 union {
69 struct nvif_notify_conn_req_v0 v0;
70 } *req = data;
33 struct nvkm_output *outp; 71 struct nvkm_output *outp;
34 list_for_each_entry(outp, &disp->outp, head) { 72 int ret;
35 if (outp->conn->index == index) { 73
36 if (outp->conn->hpd.event) 74 if (nvif_unpack(req->v0, 0, 0, false)) {
37 return 0; 75 notify->size = sizeof(struct nvif_notify_conn_rep_v0);
38 break; 76 list_for_each_entry(outp, &disp->outp, head) {
77 if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
78 if (ret = -ENODEV, outp->conn->hpd.event) {
79 notify->types = req->v0.mask;
80 notify->index = req->v0.conn;
81 ret = 0;
82 }
83 break;
84 }
39 } 85 }
40 } 86 }
41 return -ENOSYS; 87
88 return ret;
89}
90
91static const struct nvkm_event_func
92nouveau_disp_hpd_func = {
93 .ctor = nouveau_disp_hpd_ctor
94};
95
96int
97nouveau_disp_ntfy(struct nouveau_object *object, u32 type,
98 struct nvkm_event **event)
99{
100 struct nouveau_disp *disp = (void *)object->engine;
101 switch (type) {
102 case NV04_DISP_NTFY_VBLANK:
103 *event = &disp->vblank;
104 return 0;
105 case NV04_DISP_NTFY_CONN:
106 *event = &disp->hpd;
107 return 0;
108 default:
109 break;
110 }
111 return -EINVAL;
42} 112}
43 113
44int 114int
@@ -97,7 +167,8 @@ _nouveau_disp_dtor(struct nouveau_object *object)
97 struct nouveau_disp *disp = (void *)object; 167 struct nouveau_disp *disp = (void *)object;
98 struct nvkm_output *outp, *outt; 168 struct nvkm_output *outp, *outt;
99 169
100 nouveau_event_destroy(&disp->vblank); 170 nvkm_event_fini(&disp->vblank);
171 nvkm_event_fini(&disp->hpd);
101 172
102 if (disp->outp.next) { 173 if (disp->outp.next) {
103 list_for_each_entry_safe(outp, outt, &disp->outp, head) { 174 list_for_each_entry_safe(outp, outt, &disp->outp, head) {
@@ -157,14 +228,11 @@ nouveau_disp_create_(struct nouveau_object *parent,
157 hpd = max(hpd, (u8)(dcbE.connector + 1)); 228 hpd = max(hpd, (u8)(dcbE.connector + 1));
158 } 229 }
159 230
160 ret = nouveau_event_create(3, hpd, &disp->hpd); 231 ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd);
161 if (ret) 232 if (ret)
162 return ret; 233 return ret;
163 234
164 disp->hpd->priv = disp; 235 ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
165 disp->hpd->check = nouveau_disp_hpd_check;
166
167 ret = nouveau_event_create(1, heads, &disp->vblank);
168 if (ret) 236 if (ret)
169 return ret; 237 return ret;
170 238
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
index 4ffbc70ecf5a..3d1070228977 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -22,39 +22,41 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <nvif/event.h>
27
25#include <subdev/gpio.h> 28#include <subdev/gpio.h>
26 29
27#include "conn.h" 30#include "conn.h"
28#include "outp.h" 31#include "outp.h"
29 32
30static void 33static int
31nvkm_connector_hpd_work(struct work_struct *w) 34nvkm_connector_hpd(struct nvkm_notify *notify)
32{ 35{
33 struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work); 36 struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
34 struct nouveau_disp *disp = nouveau_disp(conn); 37 struct nouveau_disp *disp = nouveau_disp(conn);
35 struct nouveau_gpio *gpio = nouveau_gpio(conn); 38 struct nouveau_gpio *gpio = nouveau_gpio(conn);
36 u32 send = NVKM_HPD_UNPLUG; 39 const struct nvkm_gpio_ntfy_rep *line = notify->data;
37 if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index)) 40 struct nvif_notify_conn_rep_v0 rep;
38 send = NVKM_HPD_PLUG; 41 int index = conn->index;
39 nouveau_event_trigger(disp->hpd, send, conn->index);
40 nouveau_event_get(conn->hpd.event);
41}
42 42
43static int 43 DBG("HPD: %d\n", line->mask);
44nvkm_connector_hpd(void *data, u32 type, int index) 44
45{ 45 if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
46 struct nvkm_connector *conn = data; 46 rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
47 DBG("HPD: %d\n", type); 47 else
48 schedule_work(&conn->hpd.work); 48 rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
49 return NVKM_EVENT_DROP; 49 rep.version = 0;
50
51 nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
52 return NVKM_NOTIFY_KEEP;
50} 53}
51 54
52int 55int
53_nvkm_connector_fini(struct nouveau_object *object, bool suspend) 56_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
54{ 57{
55 struct nvkm_connector *conn = (void *)object; 58 struct nvkm_connector *conn = (void *)object;
56 if (conn->hpd.event) 59 nvkm_notify_put(&conn->hpd);
57 nouveau_event_put(conn->hpd.event);
58 return nouveau_object_fini(&conn->base, suspend); 60 return nouveau_object_fini(&conn->base, suspend);
59} 61}
60 62
@@ -63,10 +65,8 @@ _nvkm_connector_init(struct nouveau_object *object)
63{ 65{
64 struct nvkm_connector *conn = (void *)object; 66 struct nvkm_connector *conn = (void *)object;
65 int ret = nouveau_object_init(&conn->base); 67 int ret = nouveau_object_init(&conn->base);
66 if (ret == 0) { 68 if (ret == 0)
67 if (conn->hpd.event) 69 nvkm_notify_get(&conn->hpd);
68 nouveau_event_get(conn->hpd.event);
69 }
70 return ret; 70 return ret;
71} 71}
72 72
@@ -74,7 +74,7 @@ void
74_nvkm_connector_dtor(struct nouveau_object *object) 74_nvkm_connector_dtor(struct nouveau_object *object)
75{ 75{
76 struct nvkm_connector *conn = (void *)object; 76 struct nvkm_connector *conn = (void *)object;
77 nouveau_event_ref(NULL, &conn->hpd.event); 77 nvkm_notify_fini(&conn->hpd);
78 nouveau_object_destroy(&conn->base); 78 nouveau_object_destroy(&conn->base);
79} 79}
80 80
@@ -116,19 +116,24 @@ nvkm_connector_create_(struct nouveau_object *parent,
116 if ((info->hpd = ffs(info->hpd))) { 116 if ((info->hpd = ffs(info->hpd))) {
117 if (--info->hpd >= ARRAY_SIZE(hpd)) { 117 if (--info->hpd >= ARRAY_SIZE(hpd)) {
118 ERR("hpd %02x unknown\n", info->hpd); 118 ERR("hpd %02x unknown\n", info->hpd);
119 goto done; 119 return 0;
120 } 120 }
121 info->hpd = hpd[info->hpd]; 121 info->hpd = hpd[info->hpd];
122 122
123 ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func); 123 ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
124 if (ret) { 124 if (ret) {
125 ERR("func %02x lookup failed, %d\n", info->hpd, ret); 125 ERR("func %02x lookup failed, %d\n", info->hpd, ret);
126 goto done; 126 return 0;
127 } 127 }
128 128
129 ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED, 129 ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true,
130 func.line, nvkm_connector_hpd, 130 &(struct nvkm_gpio_ntfy_req) {
131 conn, &conn->hpd.event); 131 .mask = NVKM_GPIO_TOGGLED,
132 .line = func.line,
133 },
134 sizeof(struct nvkm_gpio_ntfy_req),
135 sizeof(struct nvkm_gpio_ntfy_rep),
136 &conn->hpd);
132 if (ret) { 137 if (ret) {
133 ERR("func %02x failed, %d\n", info->hpd, ret); 138 ERR("func %02x failed, %d\n", info->hpd, ret);
134 } else { 139 } else {
@@ -136,8 +141,6 @@ nvkm_connector_create_(struct nouveau_object *parent,
136 } 141 }
137 } 142 }
138 143
139done:
140 INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
141 return 0; 144 return 0;
142} 145}
143 146
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
index 035ebeacbb1c..55e5f5c82c14 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -10,10 +10,7 @@ struct nvkm_connector {
10 struct nvbios_connE info; 10 struct nvbios_connE info;
11 int index; 11 int index;
12 12
13 struct { 13 struct nvkm_notify hpd;
14 struct nouveau_eventh *event;
15 struct work_struct work;
16 } hpd;
17}; 14};
18 15
19#define nvkm_connector_create(p,e,c,b,i,d) \ 16#define nvkm_connector_create(p,e,c,b,i,d) \
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index a66b27c0fcab..b36addff06a9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include <subdev/bios.h> 29#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
@@ -32,13 +33,28 @@
32#include "nv50.h" 33#include "nv50.h"
33 34
34int 35int
35nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) 36nv50_dac_power(NV50_DISP_MTHD_V1)
36{ 37{
37 const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | 38 const u32 doff = outp->or * 0x800;
38 (data & NV50_DISP_DAC_PWR_VSYNC) | 39 union {
39 (data & NV50_DISP_DAC_PWR_DATA) | 40 struct nv50_disp_dac_pwr_v0 v0;
40 (data & NV50_DISP_DAC_PWR_STATE); 41 } *args = data;
41 const u32 doff = (or * 0x800); 42 u32 stat;
43 int ret;
44
45 nv_ioctl(object, "disp dac pwr size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) {
47 nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
48 "vsync %d hsync %d\n",
49 args->v0.version, args->v0.state, args->v0.data,
50 args->v0.vsync, args->v0.hsync);
51 stat = 0x00000040 * !args->v0.state;
52 stat |= 0x00000010 * !args->v0.data;
53 stat |= 0x00000004 * !args->v0.vsync;
54 stat |= 0x00000001 * !args->v0.hsync;
55 } else
56 return ret;
57
42 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 58 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
43 nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); 59 nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
44 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 60 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -46,9 +62,24 @@ nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
46} 62}
47 63
48int 64int
49nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) 65nv50_dac_sense(NV50_DISP_MTHD_V1)
50{ 66{
51 const u32 doff = (or * 0x800); 67 union {
68 struct nv50_disp_dac_load_v0 v0;
69 } *args = data;
70 const u32 doff = outp->or * 0x800;
71 u32 loadval;
72 int ret;
73
74 nv_ioctl(object, "disp dac load size %d\n", size);
75 if (nvif_unpack(args->v0, 0, 0, false)) {
76 nv_ioctl(object, "disp dac load vers %d data %08x\n",
77 args->v0.version, args->v0.data);
78 if (args->v0.data & 0xfff00000)
79 return -EINVAL;
80 loadval = args->v0.data;
81 } else
82 return ret;
52 83
53 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); 84 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
54 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 85 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -61,38 +92,10 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
61 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); 92 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
62 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); 93 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
63 94
64 nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval); 95 nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
65 if (!(loadval & 0x80000000)) 96 if (!(loadval & 0x80000000))
66 return -ETIMEDOUT; 97 return -ETIMEDOUT;
67 98
68 return (loadval & 0x38000000) >> 27; 99 args->v0.load = (loadval & 0x38000000) >> 27;
69} 100 return 0;
70
71int
72nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
73{
74 struct nv50_disp_priv *priv = (void *)object->engine;
75 const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
76 u32 *data = args;
77 int ret;
78
79 if (size < sizeof(u32))
80 return -EINVAL;
81
82 switch (mthd & ~0x3f) {
83 case NV50_DISP_DAC_PWR:
84 ret = priv->dac.power(priv, or, data[0]);
85 break;
86 case NV50_DISP_DAC_LOAD:
87 ret = priv->dac.sense(priv, or, data[0]);
88 if (ret >= 0) {
89 data[0] = ret;
90 ret = 0;
91 }
92 break;
93 default:
94 BUG_ON(1);
95 }
96
97 return ret;
98} 101}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 5a5b59b21130..39890221b91c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -30,7 +30,7 @@
30 30
31#include <engine/disp.h> 31#include <engine/disp.h>
32 32
33#include <core/class.h> 33#include <nvif/class.h>
34 34
35#include "dport.h" 35#include "dport.h"
36#include "outpdp.h" 36#include "outpdp.h"
@@ -335,7 +335,7 @@ nouveau_dp_train(struct work_struct *w)
335 int ret; 335 int ret;
336 336
337 /* bring capabilities within encoder limits */ 337 /* bring capabilities within encoder limits */
338 if (nv_mclass(disp) < NVD0_DISP_CLASS) 338 if (nv_mclass(disp) < GF110_DISP)
339 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; 339 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
340 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { 340 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
341 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; 341 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -354,7 +354,7 @@ nouveau_dp_train(struct work_struct *w)
354 cfg--; 354 cfg--;
355 355
356 /* disable link interrupt handling during link training */ 356 /* disable link interrupt handling during link training */
357 nouveau_event_put(outp->irq); 357 nvkm_notify_put(&outp->irq);
358 358
359 /* enable down-spreading and execute pre-train script from vbios */ 359 /* enable down-spreading and execute pre-train script from vbios */
360 dp_link_train_init(dp, outp->dpcd[3] & 0x01); 360 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
@@ -395,5 +395,5 @@ nouveau_dp_train(struct work_struct *w)
395 DBG("training complete\n"); 395 DBG("training complete\n");
396 atomic_set(&outp->lt.done, 1); 396 atomic_set(&outp->lt.done, 1);
397 wake_up(&outp->lt.wait); 397 wake_up(&outp->lt.wait);
398 nouveau_event_get(outp->irq); 398 nvkm_notify_get(&outp->irq);
399} 399}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index 9fc7447fec90..d54da8b5f87e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -35,17 +35,17 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37gm107_disp_sclass[] = { 37gm107_disp_sclass[] = {
38 { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 38 { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
39 { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, 39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
40 { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, 40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
41 { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, 41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
42 { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, 42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
43 {} 43 {}
44}; 44};
45 45
46static struct nouveau_oclass 46static struct nouveau_oclass
47gm107_disp_base_oclass[] = { 47gm107_disp_base_oclass[] = {
48 { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, 48 { GM107_DISP, &nvd0_disp_base_ofuncs },
49 {} 49 {}
50}; 50};
51 51
@@ -93,9 +93,11 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
93 .init = _nouveau_disp_init, 93 .init = _nouveau_disp_init,
94 .fini = _nouveau_disp_fini, 94 .fini = _nouveau_disp_fini,
95 }, 95 },
96 .base.vblank = &nvd0_disp_vblank_func,
96 .base.outp = nvd0_disp_outp_sclass, 97 .base.outp = nvd0_disp_outp_sclass,
97 .mthd.core = &nve0_disp_mast_mthd_chan, 98 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan, 99 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 100 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
100 .mthd.prev = -0x020000, 101 .mthd.prev = -0x020000,
102 .head.scanoutpos = nvd0_disp_base_scanoutpos,
101}.base.base; 103}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index a19e7d79b847..8b4e06abe533 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -22,25 +22,37 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include "nv50.h" 29#include "nv50.h"
29 30
30int 31int
31nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) 32nva3_hda_eld(NV50_DISP_MTHD_V1)
32{ 33{
33 const u32 soff = (or * 0x800); 34 union {
34 int i; 35 struct nv50_disp_sor_hda_eld_v0 v0;
36 } *args = data;
37 const u32 soff = outp->or * 0x800;
38 int ret, i;
35 39
36 if (data && data[0]) { 40 nv_ioctl(object, "disp sor hda eld size %d\n", size);
41 if (nvif_unpack(args->v0, 0, 0, true)) {
42 nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
43 if (size > 0x60)
44 return -E2BIG;
45 } else
46 return ret;
47
48 if (size && args->v0.data[0]) {
37 for (i = 0; i < size; i++) 49 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); 50 nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
39 for (; i < 0x60; i++) 51 for (; i < 0x60; i++)
40 nv_wr32(priv, 0x61c440 + soff, (i << 8)); 52 nv_wr32(priv, 0x61c440 + soff, (i << 8));
41 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); 53 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
42 } else 54 } else
43 if (data) { 55 if (size) {
44 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); 56 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
45 } else { 57 } else {
46 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); 58 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index 717639386ced..baf558fc12fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include <subdev/bios.h> 29#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
@@ -33,19 +34,30 @@
33#include "nv50.h" 34#include "nv50.h"
34 35
35int 36int
36nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) 37nvd0_hda_eld(NV50_DISP_MTHD_V1)
37{ 38{
38 const u32 soff = (or * 0x030); 39 union {
39 int i; 40 struct nv50_disp_sor_hda_eld_v0 v0;
41 } *args = data;
42 const u32 soff = outp->or * 0x030;
43 int ret, i;
40 44
41 if (data && data[0]) { 45 nv_ioctl(object, "disp sor hda eld size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, true)) {
47 nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
48 if (size > 0x60)
49 return -E2BIG;
50 } else
51 return ret;
52
53 if (size && args->v0.data[0]) {
42 for (i = 0; i < size; i++) 54 for (i = 0; i < size; i++)
43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); 55 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
44 for (; i < 0x60; i++) 56 for (; i < 0x60; i++)
45 nv_wr32(priv, 0x10ec00 + soff, (i << 8)); 57 nv_wr32(priv, 0x10ec00 + soff, (i << 8));
46 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); 58 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
47 } else 59 } else
48 if (data) { 60 if (size) {
49 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); 61 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
50 } else { 62 } else {
51 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); 63 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 7fdade6e604d..fa276dede9cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -22,17 +22,38 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include "nv50.h" 29#include "nv50.h"
29 30
30int 31int
31nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) 32nv84_hdmi_ctrl(NV50_DISP_MTHD_V1)
32{ 33{
33 const u32 hoff = (head * 0x800); 34 const u32 hoff = (head * 0x800);
35 union {
36 struct nv50_disp_sor_hdmi_pwr_v0 v0;
37 } *args = data;
38 u32 ctrl;
39 int ret;
34 40
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { 41 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
42 if (nvif_unpack(args->v0, 0, 0, false)) {
43 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
44 "max_ac_packet %d rekey %d\n",
45 args->v0.version, args->v0.state,
46 args->v0.max_ac_packet, args->v0.rekey);
47 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
48 return -EINVAL;
49 ctrl = 0x40000000 * !!args->v0.state;
50 ctrl |= args->v0.max_ac_packet << 16;
51 ctrl |= args->v0.rekey;
52 ctrl |= 0x1f000000; /* ??? */
53 } else
54 return ret;
55
56 if (!(ctrl & 0x40000000)) {
36 nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); 57 nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); 58 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); 59 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
65 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 86 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
66 87
67 /* HDMI_CTRL */ 88 /* HDMI_CTRL */
68 nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); 89 nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
69 return 0; 90 return 0;
70} 91}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
index db8c6fd46278..57eeed1d1942 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -22,17 +22,38 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include "nv50.h" 29#include "nv50.h"
29 30
30int 31int
31nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) 32nva3_hdmi_ctrl(NV50_DISP_MTHD_V1)
32{ 33{
33 const u32 soff = (or * 0x800); 34 const u32 soff = outp->or * 0x800;
35 union {
36 struct nv50_disp_sor_hdmi_pwr_v0 v0;
37 } *args = data;
38 u32 ctrl;
39 int ret;
34 40
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { 41 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
42 if (nvif_unpack(args->v0, 0, 0, false)) {
43 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
44 "max_ac_packet %d rekey %d\n",
45 args->v0.version, args->v0.state,
46 args->v0.max_ac_packet, args->v0.rekey);
47 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
48 return -EINVAL;
49 ctrl = 0x40000000 * !!args->v0.state;
50 ctrl |= args->v0.max_ac_packet << 16;
51 ctrl |= args->v0.rekey;
52 ctrl |= 0x1f000000; /* ??? */
53 } else
54 return ret;
55
56 if (!(ctrl & 0x40000000)) {
36 nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); 57 nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); 58 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); 59 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
65 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 86 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
66 87
67 /* HDMI_CTRL */ 88 /* HDMI_CTRL */
68 nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); 89 nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
69 return 0; 90 return 0;
70} 91}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
index 5151bb261832..3106d295b48d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -22,17 +22,37 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include "nv50.h" 29#include "nv50.h"
29 30
30int 31int
31nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) 32nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1)
32{ 33{
33 const u32 hoff = (head * 0x800); 34 const u32 hoff = (head * 0x800);
35 union {
36 struct nv50_disp_sor_hdmi_pwr_v0 v0;
37 } *args = data;
38 u32 ctrl;
39 int ret;
34 40
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { 41 nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
42 if (nvif_unpack(args->v0, 0, 0, false)) {
43 nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
44 "max_ac_packet %d rekey %d\n",
45 args->v0.version, args->v0.state,
46 args->v0.max_ac_packet, args->v0.rekey);
47 if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
48 return -EINVAL;
49 ctrl = 0x40000000 * !!args->v0.state;
50 ctrl |= args->v0.max_ac_packet << 16;
51 ctrl |= args->v0.rekey;
52 } else
53 return ret;
54
55 if (!(ctrl & 0x40000000)) {
36 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); 56 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); 57 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); 58 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
@@ -54,7 +74,7 @@ nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
54 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); 74 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
55 75
56 /* HDMI_CTRL */ 76 /* HDMI_CTRL */
57 nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); 77 nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
58 78
59 /* NFI, audio doesn't work without it though.. */ 79 /* NFI, audio doesn't work without it though.. */
60 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); 80 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a32666ed0c47..366f315fc9a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,60 +24,100 @@
24 24
25#include "priv.h" 25#include "priv.h"
26 26
27#include <core/client.h>
27#include <core/event.h> 28#include <core/event.h>
28#include <core/class.h> 29#include <nvif/unpack.h>
30#include <nvif/class.h>
29 31
30struct nv04_disp_priv { 32struct nv04_disp_priv {
31 struct nouveau_disp base; 33 struct nouveau_disp base;
32}; 34};
33 35
34static int 36static int
35nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd, 37nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv,
36 void *data, u32 size) 38 void *data, u32 size, int head)
37{ 39{
38 struct nv04_disp_priv *priv = (void *)object->engine; 40 const u32 hoff = head * 0x2000;
39 struct nv04_display_scanoutpos *args = data; 41 union {
40 const int head = (mthd & NV04_DISP_MTHD_HEAD); 42 struct nv04_disp_scanoutpos_v0 v0;
43 } *args = data;
41 u32 line; 44 u32 line;
45 int ret;
46
47 nv_ioctl(object, "disp scanoutpos size %d\n", size);
48 if (nvif_unpack(args->v0, 0, 0, false)) {
49 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
50 args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
51 args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
52 args->v0.vblanke = args->v0.vtotal - 1;
53
54 args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
55 args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
56 args->v0.hblanke = args->v0.htotal - 1;
57
58 /*
59 * If output is vga instead of digital then vtotal/htotal is
60 * invalid so we have to give up and trigger the timestamping
61 * fallback in the drm core.
62 */
63 if (!args->v0.vtotal || !args->v0.htotal)
64 return -ENOTSUPP;
65
66 args->v0.time[0] = ktime_to_ns(ktime_get());
67 line = nv_rd32(priv, 0x600868 + hoff);
68 args->v0.time[1] = ktime_to_ns(ktime_get());
69 args->v0.hline = (line & 0xffff0000) >> 16;
70 args->v0.vline = (line & 0x0000ffff);
71 } else
72 return ret;
42 73
43 if (size < sizeof(*args))
44 return -EINVAL;
45
46 args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
47 args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
48 args->vblanke = args->vtotal - 1;
49
50 args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
51 args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
52 args->hblanke = args->htotal - 1;
53
54 /*
55 * If output is vga instead of digital then vtotal/htotal is invalid
56 * so we have to give up and trigger the timestamping fallback in the
57 * drm core.
58 */
59 if (!args->vtotal || !args->htotal)
60 return -ENOTSUPP;
61
62 args->time[0] = ktime_to_ns(ktime_get());
63 line = nv_rd32(priv, 0x600868 + (head * 0x2000));
64 args->time[1] = ktime_to_ns(ktime_get());
65 args->hline = (line & 0xffff0000) >> 16;
66 args->vline = (line & 0x0000ffff);
67 return 0; 74 return 0;
68} 75}
69 76
70#define HEAD_MTHD(n) (n), (n) + 0x01 77static int
78nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
79{
80 union {
81 struct nv04_disp_mthd_v0 v0;
82 } *args = data;
83 struct nv04_disp_priv *priv = (void *)object->engine;
84 int head, ret;
85
86 nv_ioctl(object, "disp mthd size %d\n", size);
87 if (nvif_unpack(args->v0, 0, 0, true)) {
88 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
89 args->v0.version, args->v0.method, args->v0.head);
90 mthd = args->v0.method;
91 head = args->v0.head;
92 } else
93 return ret;
71 94
72static struct nouveau_omthds 95 if (head < 0 || head >= 2)
73nv04_disp_omthds[] = { 96 return -ENXIO;
74 { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos }, 97
75 {} 98 switch (mthd) {
99 case NV04_DISP_SCANOUTPOS:
100 return nv04_disp_scanoutpos(object, priv, data, size, head);
101 default:
102 break;
103 }
104
105 return -EINVAL;
106}
107
108static struct nouveau_ofuncs
109nv04_disp_ofuncs = {
110 .ctor = _nouveau_object_ctor,
111 .dtor = nouveau_object_destroy,
112 .init = nouveau_object_init,
113 .fini = nouveau_object_fini,
114 .mthd = nv04_disp_mthd,
115 .ntfy = nouveau_disp_ntfy,
76}; 116};
77 117
78static struct nouveau_oclass 118static struct nouveau_oclass
79nv04_disp_sclass[] = { 119nv04_disp_sclass[] = {
80 { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds }, 120 { NV04_DISP, &nv04_disp_ofuncs },
81 {}, 121 {},
82}; 122};
83 123
@@ -86,17 +126,26 @@ nv04_disp_sclass[] = {
86 ******************************************************************************/ 126 ******************************************************************************/
87 127
88static void 128static void
89nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head) 129nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
90{ 130{
91 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001); 131 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
132 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
92} 133}
93 134
94static void 135static void
95nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head) 136nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
96{ 137{
97 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000); 138 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
139 nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
98} 140}
99 141
142static const struct nvkm_event_func
143nv04_disp_vblank_func = {
144 .ctor = nouveau_disp_vblank_ctor,
145 .init = nv04_disp_vblank_init,
146 .fini = nv04_disp_vblank_fini,
147};
148
100static void 149static void
101nv04_disp_intr(struct nouveau_subdev *subdev) 150nv04_disp_intr(struct nouveau_subdev *subdev)
102{ 151{
@@ -106,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
106 u32 pvideo; 155 u32 pvideo;
107 156
108 if (crtc0 & 0x00000001) { 157 if (crtc0 & 0x00000001) {
109 nouveau_event_trigger(priv->base.vblank, 1, 0); 158 nouveau_disp_vblank(&priv->base, 0);
110 nv_wr32(priv, 0x600100, 0x00000001); 159 nv_wr32(priv, 0x600100, 0x00000001);
111 } 160 }
112 161
113 if (crtc1 & 0x00000001) { 162 if (crtc1 & 0x00000001) {
114 nouveau_event_trigger(priv->base.vblank, 1, 1); 163 nouveau_disp_vblank(&priv->base, 1);
115 nv_wr32(priv, 0x602100, 0x00000001); 164 nv_wr32(priv, 0x602100, 0x00000001);
116 } 165 }
117 166
@@ -140,9 +189,6 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
140 189
141 nv_engine(priv)->sclass = nv04_disp_sclass; 190 nv_engine(priv)->sclass = nv04_disp_sclass;
142 nv_subdev(priv)->intr = nv04_disp_intr; 191 nv_subdev(priv)->intr = nv04_disp_intr;
143 priv->base.vblank->priv = priv;
144 priv->base.vblank->enable = nv04_disp_vblank_enable;
145 priv->base.vblank->disable = nv04_disp_vblank_disable;
146 return 0; 192 return 0;
147} 193}
148 194
@@ -155,4 +201,5 @@ nv04_disp_oclass = &(struct nouveau_disp_impl) {
155 .init = _nouveau_disp_init, 201 .init = _nouveau_disp_init,
156 .fini = _nouveau_disp_fini, 202 .fini = _nouveau_disp_fini,
157 }, 203 },
204 .vblank = &nv04_disp_vblank_func,
158}.base; 205}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2283c442a10d..4b5bb5d58a54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -23,10 +23,12 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/client.h>
26#include <core/parent.h> 27#include <core/parent.h>
27#include <core/handle.h> 28#include <core/handle.h>
28#include <core/class.h>
29#include <core/enum.h> 29#include <core/enum.h>
30#include <nvif/unpack.h>
31#include <nvif/class.h>
30 32
31#include <subdev/bios.h> 33#include <subdev/bios.h>
32#include <subdev/bios/dcb.h> 34#include <subdev/bios/dcb.h>
@@ -43,14 +45,16 @@
43 * EVO channel base class 45 * EVO channel base class
44 ******************************************************************************/ 46 ******************************************************************************/
45 47
46int 48static int
47nv50_disp_chan_create_(struct nouveau_object *parent, 49nv50_disp_chan_create_(struct nouveau_object *parent,
48 struct nouveau_object *engine, 50 struct nouveau_object *engine,
49 struct nouveau_oclass *oclass, int chid, 51 struct nouveau_oclass *oclass, int head,
50 int length, void **pobject) 52 int length, void **pobject)
51{ 53{
54 const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
52 struct nv50_disp_base *base = (void *)parent; 55 struct nv50_disp_base *base = (void *)parent;
53 struct nv50_disp_chan *chan; 56 struct nv50_disp_chan *chan;
57 int chid = impl->chid + head;
54 int ret; 58 int ret;
55 59
56 if (base->chan & (1 << chid)) 60 if (base->chan & (1 << chid))
@@ -63,12 +67,14 @@ nv50_disp_chan_create_(struct nouveau_object *parent,
63 chan = *pobject; 67 chan = *pobject;
64 if (ret) 68 if (ret)
65 return ret; 69 return ret;
66
67 chan->chid = chid; 70 chan->chid = chid;
71
72 nv_parent(chan)->object_attach = impl->attach;
73 nv_parent(chan)->object_detach = impl->detach;
68 return 0; 74 return 0;
69} 75}
70 76
71void 77static void
72nv50_disp_chan_destroy(struct nv50_disp_chan *chan) 78nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
73{ 79{
74 struct nv50_disp_base *base = (void *)nv_object(chan)->parent; 80 struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
@@ -76,6 +82,16 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
76 nouveau_namedb_destroy(&chan->base); 82 nouveau_namedb_destroy(&chan->base);
77} 83}
78 84
85int
86nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
87{
88 struct nv50_disp_chan *chan = (void *)object;
89 *addr = nv_device_resource_start(nv_device(object), 0) +
90 0x640000 + (chan->chid * 0x1000);
91 *size = 0x001000;
92 return 0;
93}
94
79u32 95u32
80nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) 96nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
81{ 97{
@@ -115,16 +131,16 @@ nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
115 nouveau_ramht_remove(base->ramht, cookie); 131 nouveau_ramht_remove(base->ramht, cookie);
116} 132}
117 133
118int 134static int
119nv50_disp_dmac_create_(struct nouveau_object *parent, 135nv50_disp_dmac_create_(struct nouveau_object *parent,
120 struct nouveau_object *engine, 136 struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, u32 pushbuf, int chid, 137 struct nouveau_oclass *oclass, u32 pushbuf, int head,
122 int length, void **pobject) 138 int length, void **pobject)
123{ 139{
124 struct nv50_disp_dmac *dmac; 140 struct nv50_disp_dmac *dmac;
125 int ret; 141 int ret;
126 142
127 ret = nv50_disp_chan_create_(parent, engine, oclass, chid, 143 ret = nv50_disp_chan_create_(parent, engine, oclass, head,
128 length, pobject); 144 length, pobject);
129 dmac = *pobject; 145 dmac = *pobject;
130 if (ret) 146 if (ret)
@@ -397,27 +413,32 @@ nv50_disp_mast_mthd_chan = {
397 } 413 }
398}; 414};
399 415
400static int 416int
401nv50_disp_mast_ctor(struct nouveau_object *parent, 417nv50_disp_mast_ctor(struct nouveau_object *parent,
402 struct nouveau_object *engine, 418 struct nouveau_object *engine,
403 struct nouveau_oclass *oclass, void *data, u32 size, 419 struct nouveau_oclass *oclass, void *data, u32 size,
404 struct nouveau_object **pobject) 420 struct nouveau_object **pobject)
405{ 421{
406 struct nv50_display_mast_class *args = data; 422 union {
423 struct nv50_disp_core_channel_dma_v0 v0;
424 } *args = data;
407 struct nv50_disp_dmac *mast; 425 struct nv50_disp_dmac *mast;
408 int ret; 426 int ret;
409 427
410 if (size < sizeof(*args)) 428 nv_ioctl(parent, "create disp core channel dma size %d\n", size);
411 return -EINVAL; 429 if (nvif_unpack(args->v0, 0, 0, false)) {
430 nv_ioctl(parent, "create disp core channel dma vers %d "
431 "pushbuf %08x\n",
432 args->v0.version, args->v0.pushbuf);
433 } else
434 return ret;
412 435
413 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 436 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
414 0, sizeof(*mast), (void **)&mast); 437 0, sizeof(*mast), (void **)&mast);
415 *pobject = nv_object(mast); 438 *pobject = nv_object(mast);
416 if (ret) 439 if (ret)
417 return ret; 440 return ret;
418 441
419 nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
420 nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
421 return 0; 442 return 0;
422} 443}
423 444
@@ -479,14 +500,18 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
479 return nv50_disp_chan_fini(&mast->base, suspend); 500 return nv50_disp_chan_fini(&mast->base, suspend);
480} 501}
481 502
482struct nouveau_ofuncs 503struct nv50_disp_chan_impl
483nv50_disp_mast_ofuncs = { 504nv50_disp_mast_ofuncs = {
484 .ctor = nv50_disp_mast_ctor, 505 .base.ctor = nv50_disp_mast_ctor,
485 .dtor = nv50_disp_dmac_dtor, 506 .base.dtor = nv50_disp_dmac_dtor,
486 .init = nv50_disp_mast_init, 507 .base.init = nv50_disp_mast_init,
487 .fini = nv50_disp_mast_fini, 508 .base.fini = nv50_disp_mast_fini,
488 .rd32 = nv50_disp_chan_rd32, 509 .base.map = nv50_disp_chan_map,
489 .wr32 = nv50_disp_chan_wr32, 510 .base.rd32 = nv50_disp_chan_rd32,
511 .base.wr32 = nv50_disp_chan_wr32,
512 .chid = 0,
513 .attach = nv50_disp_dmac_object_attach,
514 .detach = nv50_disp_dmac_object_detach,
490}; 515};
491 516
492/******************************************************************************* 517/*******************************************************************************
@@ -543,39 +568,51 @@ nv50_disp_sync_mthd_chan = {
543 } 568 }
544}; 569};
545 570
546static int 571int
547nv50_disp_sync_ctor(struct nouveau_object *parent, 572nv50_disp_sync_ctor(struct nouveau_object *parent,
548 struct nouveau_object *engine, 573 struct nouveau_object *engine,
549 struct nouveau_oclass *oclass, void *data, u32 size, 574 struct nouveau_oclass *oclass, void *data, u32 size,
550 struct nouveau_object **pobject) 575 struct nouveau_object **pobject)
551{ 576{
552 struct nv50_display_sync_class *args = data; 577 union {
578 struct nv50_disp_base_channel_dma_v0 v0;
579 } *args = data;
580 struct nv50_disp_priv *priv = (void *)engine;
553 struct nv50_disp_dmac *dmac; 581 struct nv50_disp_dmac *dmac;
554 int ret; 582 int ret;
555 583
556 if (size < sizeof(*args) || args->head > 1) 584 nv_ioctl(parent, "create disp base channel dma size %d\n", size);
557 return -EINVAL; 585 if (nvif_unpack(args->v0, 0, 0, false)) {
586 nv_ioctl(parent, "create disp base channel dma vers %d "
587 "pushbuf %08x head %d\n",
588 args->v0.version, args->v0.pushbuf, args->v0.head);
589 if (args->v0.head > priv->head.nr)
590 return -EINVAL;
591 } else
592 return ret;
558 593
559 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 594 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
560 1 + args->head, sizeof(*dmac), 595 args->v0.head, sizeof(*dmac),
561 (void **)&dmac); 596 (void **)&dmac);
562 *pobject = nv_object(dmac); 597 *pobject = nv_object(dmac);
563 if (ret) 598 if (ret)
564 return ret; 599 return ret;
565 600
566 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
567 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
568 return 0; 601 return 0;
569} 602}
570 603
571struct nouveau_ofuncs 604struct nv50_disp_chan_impl
572nv50_disp_sync_ofuncs = { 605nv50_disp_sync_ofuncs = {
573 .ctor = nv50_disp_sync_ctor, 606 .base.ctor = nv50_disp_sync_ctor,
574 .dtor = nv50_disp_dmac_dtor, 607 .base.dtor = nv50_disp_dmac_dtor,
575 .init = nv50_disp_dmac_init, 608 .base.init = nv50_disp_dmac_init,
576 .fini = nv50_disp_dmac_fini, 609 .base.fini = nv50_disp_dmac_fini,
577 .rd32 = nv50_disp_chan_rd32, 610 .base.map = nv50_disp_chan_map,
578 .wr32 = nv50_disp_chan_wr32, 611 .base.rd32 = nv50_disp_chan_rd32,
612 .base.wr32 = nv50_disp_chan_wr32,
613 .chid = 1,
614 .attach = nv50_disp_dmac_object_attach,
615 .detach = nv50_disp_dmac_object_detach,
579}; 616};
580 617
581/******************************************************************************* 618/*******************************************************************************
@@ -620,39 +657,51 @@ nv50_disp_ovly_mthd_chan = {
620 } 657 }
621}; 658};
622 659
623static int 660int
624nv50_disp_ovly_ctor(struct nouveau_object *parent, 661nv50_disp_ovly_ctor(struct nouveau_object *parent,
625 struct nouveau_object *engine, 662 struct nouveau_object *engine,
626 struct nouveau_oclass *oclass, void *data, u32 size, 663 struct nouveau_oclass *oclass, void *data, u32 size,
627 struct nouveau_object **pobject) 664 struct nouveau_object **pobject)
628{ 665{
629 struct nv50_display_ovly_class *args = data; 666 union {
667 struct nv50_disp_overlay_channel_dma_v0 v0;
668 } *args = data;
669 struct nv50_disp_priv *priv = (void *)engine;
630 struct nv50_disp_dmac *dmac; 670 struct nv50_disp_dmac *dmac;
631 int ret; 671 int ret;
632 672
633 if (size < sizeof(*args) || args->head > 1) 673 nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
634 return -EINVAL; 674 if (nvif_unpack(args->v0, 0, 0, false)) {
675 nv_ioctl(parent, "create disp overlay channel dma vers %d "
676 "pushbuf %08x head %d\n",
677 args->v0.version, args->v0.pushbuf, args->v0.head);
678 if (args->v0.head > priv->head.nr)
679 return -EINVAL;
680 } else
681 return ret;
635 682
636 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 683 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
637 3 + args->head, sizeof(*dmac), 684 args->v0.head, sizeof(*dmac),
638 (void **)&dmac); 685 (void **)&dmac);
639 *pobject = nv_object(dmac); 686 *pobject = nv_object(dmac);
640 if (ret) 687 if (ret)
641 return ret; 688 return ret;
642 689
643 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
644 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
645 return 0; 690 return 0;
646} 691}
647 692
648struct nouveau_ofuncs 693struct nv50_disp_chan_impl
649nv50_disp_ovly_ofuncs = { 694nv50_disp_ovly_ofuncs = {
650 .ctor = nv50_disp_ovly_ctor, 695 .base.ctor = nv50_disp_ovly_ctor,
651 .dtor = nv50_disp_dmac_dtor, 696 .base.dtor = nv50_disp_dmac_dtor,
652 .init = nv50_disp_dmac_init, 697 .base.init = nv50_disp_dmac_init,
653 .fini = nv50_disp_dmac_fini, 698 .base.fini = nv50_disp_dmac_fini,
654 .rd32 = nv50_disp_chan_rd32, 699 .base.map = nv50_disp_chan_map,
655 .wr32 = nv50_disp_chan_wr32, 700 .base.rd32 = nv50_disp_chan_rd32,
701 .base.wr32 = nv50_disp_chan_wr32,
702 .chid = 3,
703 .attach = nv50_disp_dmac_object_attach,
704 .detach = nv50_disp_dmac_object_detach,
656}; 705};
657 706
658/******************************************************************************* 707/*******************************************************************************
@@ -662,14 +711,14 @@ nv50_disp_ovly_ofuncs = {
662static int 711static int
663nv50_disp_pioc_create_(struct nouveau_object *parent, 712nv50_disp_pioc_create_(struct nouveau_object *parent,
664 struct nouveau_object *engine, 713 struct nouveau_object *engine,
665 struct nouveau_oclass *oclass, int chid, 714 struct nouveau_oclass *oclass, int head,
666 int length, void **pobject) 715 int length, void **pobject)
667{ 716{
668 return nv50_disp_chan_create_(parent, engine, oclass, chid, 717 return nv50_disp_chan_create_(parent, engine, oclass, head,
669 length, pobject); 718 length, pobject);
670} 719}
671 720
672static void 721void
673nv50_disp_pioc_dtor(struct nouveau_object *object) 722nv50_disp_pioc_dtor(struct nouveau_object *object)
674{ 723{
675 struct nv50_disp_pioc *pioc = (void *)object; 724 struct nv50_disp_pioc *pioc = (void *)object;
@@ -727,20 +776,29 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
727 * EVO immediate overlay channel objects 776 * EVO immediate overlay channel objects
728 ******************************************************************************/ 777 ******************************************************************************/
729 778
730static int 779int
731nv50_disp_oimm_ctor(struct nouveau_object *parent, 780nv50_disp_oimm_ctor(struct nouveau_object *parent,
732 struct nouveau_object *engine, 781 struct nouveau_object *engine,
733 struct nouveau_oclass *oclass, void *data, u32 size, 782 struct nouveau_oclass *oclass, void *data, u32 size,
734 struct nouveau_object **pobject) 783 struct nouveau_object **pobject)
735{ 784{
736 struct nv50_display_oimm_class *args = data; 785 union {
786 struct nv50_disp_overlay_v0 v0;
787 } *args = data;
788 struct nv50_disp_priv *priv = (void *)engine;
737 struct nv50_disp_pioc *pioc; 789 struct nv50_disp_pioc *pioc;
738 int ret; 790 int ret;
739 791
740 if (size < sizeof(*args) || args->head > 1) 792 nv_ioctl(parent, "create disp overlay size %d\n", size);
741 return -EINVAL; 793 if (nvif_unpack(args->v0, 0, 0, false)) {
794 nv_ioctl(parent, "create disp overlay vers %d head %d\n",
795 args->v0.version, args->v0.head);
796 if (args->v0.head > priv->head.nr)
797 return -EINVAL;
798 } else
799 return ret;
742 800
743 ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, 801 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
744 sizeof(*pioc), (void **)&pioc); 802 sizeof(*pioc), (void **)&pioc);
745 *pobject = nv_object(pioc); 803 *pobject = nv_object(pioc);
746 if (ret) 804 if (ret)
@@ -749,34 +807,45 @@ nv50_disp_oimm_ctor(struct nouveau_object *parent,
749 return 0; 807 return 0;
750} 808}
751 809
752struct nouveau_ofuncs 810struct nv50_disp_chan_impl
753nv50_disp_oimm_ofuncs = { 811nv50_disp_oimm_ofuncs = {
754 .ctor = nv50_disp_oimm_ctor, 812 .base.ctor = nv50_disp_oimm_ctor,
755 .dtor = nv50_disp_pioc_dtor, 813 .base.dtor = nv50_disp_pioc_dtor,
756 .init = nv50_disp_pioc_init, 814 .base.init = nv50_disp_pioc_init,
757 .fini = nv50_disp_pioc_fini, 815 .base.fini = nv50_disp_pioc_fini,
758 .rd32 = nv50_disp_chan_rd32, 816 .base.map = nv50_disp_chan_map,
759 .wr32 = nv50_disp_chan_wr32, 817 .base.rd32 = nv50_disp_chan_rd32,
818 .base.wr32 = nv50_disp_chan_wr32,
819 .chid = 5,
760}; 820};
761 821
762/******************************************************************************* 822/*******************************************************************************
763 * EVO cursor channel objects 823 * EVO cursor channel objects
764 ******************************************************************************/ 824 ******************************************************************************/
765 825
766static int 826int
767nv50_disp_curs_ctor(struct nouveau_object *parent, 827nv50_disp_curs_ctor(struct nouveau_object *parent,
768 struct nouveau_object *engine, 828 struct nouveau_object *engine,
769 struct nouveau_oclass *oclass, void *data, u32 size, 829 struct nouveau_oclass *oclass, void *data, u32 size,
770 struct nouveau_object **pobject) 830 struct nouveau_object **pobject)
771{ 831{
772 struct nv50_display_curs_class *args = data; 832 union {
833 struct nv50_disp_cursor_v0 v0;
834 } *args = data;
835 struct nv50_disp_priv *priv = (void *)engine;
773 struct nv50_disp_pioc *pioc; 836 struct nv50_disp_pioc *pioc;
774 int ret; 837 int ret;
775 838
776 if (size < sizeof(*args) || args->head > 1) 839 nv_ioctl(parent, "create disp cursor size %d\n", size);
777 return -EINVAL; 840 if (nvif_unpack(args->v0, 0, 0, false)) {
841 nv_ioctl(parent, "create disp cursor vers %d head %d\n",
842 args->v0.version, args->v0.head);
843 if (args->v0.head > priv->head.nr)
844 return -EINVAL;
845 } else
846 return ret;
778 847
779 ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, 848 ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
780 sizeof(*pioc), (void **)&pioc); 849 sizeof(*pioc), (void **)&pioc);
781 *pobject = nv_object(pioc); 850 *pobject = nv_object(pioc);
782 if (ret) 851 if (ret)
@@ -785,14 +854,16 @@ nv50_disp_curs_ctor(struct nouveau_object *parent,
785 return 0; 854 return 0;
786} 855}
787 856
788struct nouveau_ofuncs 857struct nv50_disp_chan_impl
789nv50_disp_curs_ofuncs = { 858nv50_disp_curs_ofuncs = {
790 .ctor = nv50_disp_curs_ctor, 859 .base.ctor = nv50_disp_curs_ctor,
791 .dtor = nv50_disp_pioc_dtor, 860 .base.dtor = nv50_disp_pioc_dtor,
792 .init = nv50_disp_pioc_init, 861 .base.init = nv50_disp_pioc_init,
793 .fini = nv50_disp_pioc_fini, 862 .base.fini = nv50_disp_pioc_fini,
794 .rd32 = nv50_disp_chan_rd32, 863 .base.map = nv50_disp_chan_map,
795 .wr32 = nv50_disp_chan_wr32, 864 .base.rd32 = nv50_disp_chan_rd32,
865 .base.wr32 = nv50_disp_chan_wr32,
866 .chid = 7,
796}; 867};
797 868
798/******************************************************************************* 869/*******************************************************************************
@@ -800,47 +871,162 @@ nv50_disp_curs_ofuncs = {
800 ******************************************************************************/ 871 ******************************************************************************/
801 872
802int 873int
803nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, 874nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
804 void *data, u32 size)
805{ 875{
806 struct nv50_disp_priv *priv = (void *)object->engine; 876 const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
807 struct nv04_display_scanoutpos *args = data; 877 const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
808 const int head = (mthd & NV50_DISP_MTHD_HEAD); 878 const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540));
809 u32 blanke, blanks, total; 879 union {
880 struct nv04_disp_scanoutpos_v0 v0;
881 } *args = data;
882 int ret;
883
884 nv_ioctl(object, "disp scanoutpos size %d\n", size);
885 if (nvif_unpack(args->v0, 0, 0, false)) {
886 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
887 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
888 args->v0.hblanke = (blanke & 0x0000ffff);
889 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
890 args->v0.hblanks = (blanks & 0x0000ffff);
891 args->v0.vtotal = ( total & 0xffff0000) >> 16;
892 args->v0.htotal = ( total & 0x0000ffff);
893 args->v0.time[0] = ktime_to_ns(ktime_get());
894 args->v0.vline = /* vline read locks hline */
895 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
896 args->v0.time[1] = ktime_to_ns(ktime_get());
897 args->v0.hline =
898 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
899 } else
900 return ret;
810 901
811 if (size < sizeof(*args) || head >= priv->head.nr)
812 return -EINVAL;
813 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
814 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
815 total = nv_rd32(priv, 0x610afc + (head * 0x540));
816
817 args->vblanke = (blanke & 0xffff0000) >> 16;
818 args->hblanke = (blanke & 0x0000ffff);
819 args->vblanks = (blanks & 0xffff0000) >> 16;
820 args->hblanks = (blanks & 0x0000ffff);
821 args->vtotal = ( total & 0xffff0000) >> 16;
822 args->htotal = ( total & 0x0000ffff);
823
824 args->time[0] = ktime_to_ns(ktime_get());
825 args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
826 args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
827 args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
828 return 0; 902 return 0;
829} 903}
830 904
831static void 905int
832nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head) 906nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
907 void *data, u32 size)
833{ 908{
834 nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); 909 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
835} 910 union {
911 struct nv50_disp_mthd_v0 v0;
912 struct nv50_disp_mthd_v1 v1;
913 } *args = data;
914 struct nv50_disp_priv *priv = (void *)object->engine;
915 struct nvkm_output *outp = NULL;
916 struct nvkm_output *temp;
917 u16 type, mask = 0;
918 int head, ret;
836 919
837static void 920 if (mthd != NV50_DISP_MTHD)
838nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head) 921 return -EINVAL;
839{ 922
840 nv_mask(event->priv, 0x61002c, (4 << head), 0); 923 nv_ioctl(object, "disp mthd size %d\n", size);
924 if (nvif_unpack(args->v0, 0, 0, true)) {
925 nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
926 args->v0.version, args->v0.method, args->v0.head);
927 mthd = args->v0.method;
928 head = args->v0.head;
929 } else
930 if (nvif_unpack(args->v1, 1, 1, true)) {
931 nv_ioctl(object, "disp mthd vers %d mthd %02x "
932 "type %04x mask %04x\n",
933 args->v1.version, args->v1.method,
934 args->v1.hasht, args->v1.hashm);
935 mthd = args->v1.method;
936 type = args->v1.hasht;
937 mask = args->v1.hashm;
938 head = ffs((mask >> 8) & 0x0f) - 1;
939 } else
940 return ret;
941
942 if (head < 0 || head >= priv->head.nr)
943 return -ENXIO;
944
945 if (mask) {
946 list_for_each_entry(temp, &priv->base.outp, head) {
947 if ((temp->info.hasht == type) &&
948 (temp->info.hashm & mask) == mask) {
949 outp = temp;
950 break;
951 }
952 }
953 if (outp == NULL)
954 return -ENXIO;
955 }
956
957 switch (mthd) {
958 case NV50_DISP_SCANOUTPOS:
959 return impl->head.scanoutpos(object, priv, data, size, head);
960 default:
961 break;
962 }
963
964 switch (mthd * !!outp) {
965 case NV50_DISP_MTHD_V1_DAC_PWR:
966 return priv->dac.power(object, priv, data, size, head, outp);
967 case NV50_DISP_MTHD_V1_DAC_LOAD:
968 return priv->dac.sense(object, priv, data, size, head, outp);
969 case NV50_DISP_MTHD_V1_SOR_PWR:
970 return priv->sor.power(object, priv, data, size, head, outp);
971 case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
972 if (!priv->sor.hda_eld)
973 return -ENODEV;
974 return priv->sor.hda_eld(object, priv, data, size, head, outp);
975 case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
976 if (!priv->sor.hdmi)
977 return -ENODEV;
978 return priv->sor.hdmi(object, priv, data, size, head, outp);
979 case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
980 union {
981 struct nv50_disp_sor_lvds_script_v0 v0;
982 } *args = data;
983 nv_ioctl(object, "disp sor lvds script size %d\n", size);
984 if (nvif_unpack(args->v0, 0, 0, false)) {
985 nv_ioctl(object, "disp sor lvds script "
986 "vers %d name %04x\n",
987 args->v0.version, args->v0.script);
988 priv->sor.lvdsconf = args->v0.script;
989 return 0;
990 } else
991 return ret;
992 }
993 break;
994 case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
995 struct nvkm_output_dp *outpdp = (void *)outp;
996 union {
997 struct nv50_disp_sor_dp_pwr_v0 v0;
998 } *args = data;
999 nv_ioctl(object, "disp sor dp pwr size %d\n", size);
1000 if (nvif_unpack(args->v0, 0, 0, false)) {
1001 nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
1002 args->v0.version, args->v0.state);
1003 if (args->v0.state == 0) {
1004 nvkm_notify_put(&outpdp->irq);
1005 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
1006 ->lnk_pwr(outpdp, 0);
1007 atomic_set(&outpdp->lt.done, 0);
1008 return 0;
1009 } else
1010 if (args->v0.state != 0) {
1011 nvkm_output_dp_train(&outpdp->base, 0, true);
1012 return 0;
1013 }
1014 } else
1015 return ret;
1016 }
1017 break;
1018 case NV50_DISP_MTHD_V1_PIOR_PWR:
1019 if (!priv->pior.power)
1020 return -ENODEV;
1021 return priv->pior.power(object, priv, data, size, head, outp);
1022 default:
1023 break;
1024 }
1025
1026 return -EINVAL;
841} 1027}
842 1028
843static int 1029int
844nv50_disp_base_ctor(struct nouveau_object *parent, 1030nv50_disp_base_ctor(struct nouveau_object *parent,
845 struct nouveau_object *engine, 1031 struct nouveau_object *engine,
846 struct nouveau_oclass *oclass, void *data, u32 size, 1032 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -856,14 +1042,11 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
856 if (ret) 1042 if (ret)
857 return ret; 1043 return ret;
858 1044
859 priv->base.vblank->priv = priv;
860 priv->base.vblank->enable = nv50_disp_base_vblank_enable;
861 priv->base.vblank->disable = nv50_disp_base_vblank_disable;
862 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, 1045 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
863 &base->ramht); 1046 &base->ramht);
864} 1047}
865 1048
866static void 1049void
867nv50_disp_base_dtor(struct nouveau_object *object) 1050nv50_disp_base_dtor(struct nouveau_object *object)
868{ 1051{
869 struct nv50_disp_base *base = (void *)object; 1052 struct nv50_disp_base *base = (void *)object;
@@ -958,34 +1141,23 @@ nv50_disp_base_ofuncs = {
958 .dtor = nv50_disp_base_dtor, 1141 .dtor = nv50_disp_base_dtor,
959 .init = nv50_disp_base_init, 1142 .init = nv50_disp_base_init,
960 .fini = nv50_disp_base_fini, 1143 .fini = nv50_disp_base_fini,
961}; 1144 .mthd = nv50_disp_base_mthd,
962 1145 .ntfy = nouveau_disp_ntfy,
963static struct nouveau_omthds
964nv50_disp_base_omthds[] = {
965 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
966 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
967 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
968 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
969 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
970 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
971 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
972 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
973 {},
974}; 1146};
975 1147
976static struct nouveau_oclass 1148static struct nouveau_oclass
977nv50_disp_base_oclass[] = { 1149nv50_disp_base_oclass[] = {
978 { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, 1150 { NV50_DISP, &nv50_disp_base_ofuncs },
979 {} 1151 {}
980}; 1152};
981 1153
982static struct nouveau_oclass 1154static struct nouveau_oclass
983nv50_disp_sclass[] = { 1155nv50_disp_sclass[] = {
984 { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 1156 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
985 { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, 1157 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
986 { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, 1158 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
987 { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, 1159 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
988 { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, 1160 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
989 {} 1161 {}
990}; 1162};
991 1163
@@ -1005,7 +1177,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
1005 int ret = -EBUSY; 1177 int ret = -EBUSY;
1006 1178
1007 /* no context needed for channel objects... */ 1179 /* no context needed for channel objects... */
1008 if (nv_mclass(parent) != NV_DEVICE_CLASS) { 1180 if (nv_mclass(parent) != NV_DEVICE) {
1009 atomic_inc(&parent->refcount); 1181 atomic_inc(&parent->refcount);
1010 *pobject = parent; 1182 *pobject = parent;
1011 return 1; 1183 return 1;
@@ -1040,6 +1212,27 @@ nv50_disp_cclass = {
1040 * Display engine implementation 1212 * Display engine implementation
1041 ******************************************************************************/ 1213 ******************************************************************************/
1042 1214
1215static void
1216nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
1217{
1218 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
1219 nv_mask(disp, 0x61002c, (4 << head), 0);
1220}
1221
1222static void
1223nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
1224{
1225 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
1226 nv_mask(disp, 0x61002c, (4 << head), (4 << head));
1227}
1228
1229const struct nvkm_event_func
1230nv50_disp_vblank_func = {
1231 .ctor = nouveau_disp_vblank_ctor,
1232 .init = nv50_disp_vblank_init,
1233 .fini = nv50_disp_vblank_fini,
1234};
1235
1043static const struct nouveau_enum 1236static const struct nouveau_enum
1044nv50_disp_intr_error_type[] = { 1237nv50_disp_intr_error_type[] = {
1045 { 3, "ILLEGAL_MTHD" }, 1238 { 3, "ILLEGAL_MTHD" },
@@ -1381,7 +1574,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
1381 int TU, VTUi, VTUf, VTUa; 1574 int TU, VTUi, VTUf, VTUa;
1382 u64 link_data_rate, link_ratio, unk; 1575 u64 link_data_rate, link_ratio, unk;
1383 u32 best_diff = 64 * symbol; 1576 u32 best_diff = 64 * symbol;
1384 u32 link_nr, link_bw, bits, r; 1577 u32 link_nr, link_bw, bits;
1385 1578
1386 /* calculate packed data rate for each lane */ 1579 /* calculate packed data rate for each lane */
1387 if (dpctrl > 0x00030000) link_nr = 4; 1580 if (dpctrl > 0x00030000) link_nr = 4;
@@ -1401,7 +1594,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
1401 1594
1402 /* calculate ratio of packed data rate to link symbol rate */ 1595 /* calculate ratio of packed data rate to link symbol rate */
1403 link_ratio = link_data_rate * symbol; 1596 link_ratio = link_data_rate * symbol;
1404 r = do_div(link_ratio, link_bw); 1597 do_div(link_ratio, link_bw);
1405 1598
1406 for (TU = 64; TU >= 32; TU--) { 1599 for (TU = 64; TU >= 32; TU--) {
1407 /* calculate average number of valid symbols in each TU */ 1600 /* calculate average number of valid symbols in each TU */
@@ -1462,8 +1655,8 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
1462 /* XXX close to vbios numbers, but not right */ 1655 /* XXX close to vbios numbers, but not right */
1463 unk = (symbol - link_ratio) * bestTU; 1656 unk = (symbol - link_ratio) * bestTU;
1464 unk *= link_ratio; 1657 unk *= link_ratio;
1465 r = do_div(unk, symbol); 1658 do_div(unk, symbol);
1466 r = do_div(unk, symbol); 1659 do_div(unk, symbol);
1467 unk += 6; 1660 unk += 6;
1468 1661
1469 nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); 1662 nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
@@ -1654,13 +1847,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
1654 } 1847 }
1655 1848
1656 if (intr1 & 0x00000004) { 1849 if (intr1 & 0x00000004) {
1657 nouveau_event_trigger(priv->base.vblank, 1, 0); 1850 nouveau_disp_vblank(&priv->base, 0);
1658 nv_wr32(priv, 0x610024, 0x00000004); 1851 nv_wr32(priv, 0x610024, 0x00000004);
1659 intr1 &= ~0x00000004; 1852 intr1 &= ~0x00000004;
1660 } 1853 }
1661 1854
1662 if (intr1 & 0x00000008) { 1855 if (intr1 & 0x00000008) {
1663 nouveau_event_trigger(priv->base.vblank, 1, 1); 1856 nouveau_disp_vblank(&priv->base, 1);
1664 nv_wr32(priv, 0x610024, 0x00000008); 1857 nv_wr32(priv, 0x610024, 0x00000008);
1665 intr1 &= ~0x00000008; 1858 intr1 &= ~0x00000008;
1666 } 1859 }
@@ -1718,9 +1911,11 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
1718 .init = _nouveau_disp_init, 1911 .init = _nouveau_disp_init,
1719 .fini = _nouveau_disp_fini, 1912 .fini = _nouveau_disp_fini,
1720 }, 1913 },
1914 .base.vblank = &nv50_disp_vblank_func,
1721 .base.outp = nv50_disp_outp_sclass, 1915 .base.outp = nv50_disp_outp_sclass,
1722 .mthd.core = &nv50_disp_mast_mthd_chan, 1916 .mthd.core = &nv50_disp_mast_mthd_chan,
1723 .mthd.base = &nv50_disp_sync_mthd_chan, 1917 .mthd.base = &nv50_disp_sync_mthd_chan,
1724 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 1918 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
1725 .mthd.prev = 0x000004, 1919 .mthd.prev = 0x000004,
1920 .head.scanoutpos = nv50_disp_base_scanoutpos,
1726}.base.base; 1921}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1a886472b6f5..8ab14461f70c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -14,15 +14,10 @@
14#include "outp.h" 14#include "outp.h"
15#include "outpdp.h" 15#include "outpdp.h"
16 16
17struct nv50_disp_impl { 17#define NV50_DISP_MTHD_ struct nouveau_object *object, \
18 struct nouveau_disp_impl base; 18 struct nv50_disp_priv *priv, void *data, u32 size
19 struct { 19#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
20 const struct nv50_disp_mthd_chan *core; 20#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
21 const struct nv50_disp_mthd_chan *base;
22 const struct nv50_disp_mthd_chan *ovly;
23 int prev;
24 } mthd;
25};
26 21
27struct nv50_disp_priv { 22struct nv50_disp_priv {
28 struct nouveau_disp base; 23 struct nouveau_disp base;
@@ -36,44 +31,52 @@ struct nv50_disp_priv {
36 } head; 31 } head;
37 struct { 32 struct {
38 int nr; 33 int nr;
39 int (*power)(struct nv50_disp_priv *, int dac, u32 data); 34 int (*power)(NV50_DISP_MTHD_V1);
40 int (*sense)(struct nv50_disp_priv *, int dac, u32 load); 35 int (*sense)(NV50_DISP_MTHD_V1);
41 } dac; 36 } dac;
42 struct { 37 struct {
43 int nr; 38 int nr;
44 int (*power)(struct nv50_disp_priv *, int sor, u32 data); 39 int (*power)(NV50_DISP_MTHD_V1);
45 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); 40 int (*hda_eld)(NV50_DISP_MTHD_V1);
46 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); 41 int (*hdmi)(NV50_DISP_MTHD_V1);
47 u32 lvdsconf; 42 u32 lvdsconf;
48 } sor; 43 } sor;
49 struct { 44 struct {
50 int nr; 45 int nr;
51 int (*power)(struct nv50_disp_priv *, int ext, u32 data); 46 int (*power)(NV50_DISP_MTHD_V1);
52 u8 type[3]; 47 u8 type[3];
53 } pior; 48 } pior;
54}; 49};
55 50
56#define HEAD_MTHD(n) (n), (n) + 0x03 51struct nv50_disp_impl {
57 52 struct nouveau_disp_impl base;
58int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32); 53 struct {
54 const struct nv50_disp_mthd_chan *core;
55 const struct nv50_disp_mthd_chan *base;
56 const struct nv50_disp_mthd_chan *ovly;
57 int prev;
58 } mthd;
59 struct {
60 int (*scanoutpos)(NV50_DISP_MTHD_V0);
61 } head;
62};
59 63
60#define DAC_MTHD(n) (n), (n) + 0x03 64int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
65int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
61 66
62int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); 67int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
63int nv50_dac_power(struct nv50_disp_priv *, int, u32);
64int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
65 68
66#define SOR_MTHD(n) (n), (n) + 0x3f 69int nv50_dac_power(NV50_DISP_MTHD_V1);
70int nv50_dac_sense(NV50_DISP_MTHD_V1);
67 71
68int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); 72int nva3_hda_eld(NV50_DISP_MTHD_V1);
69int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); 73int nvd0_hda_eld(NV50_DISP_MTHD_V1);
70 74
71int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); 75int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1);
72int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); 76int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1);
73int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); 77int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1);
74 78
75int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); 79int nv50_sor_power(NV50_DISP_MTHD_V1);
76int nv50_sor_power(struct nv50_disp_priv *, int, u32);
77 80
78int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, 81int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
79 u32, struct dcb_output *); 82 u32, struct dcb_output *);
@@ -93,10 +96,7 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
93int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, 96int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
94 struct dcb_output *); 97 struct dcb_output *);
95 98
96#define PIOR_MTHD(n) (n), (n) + 0x03 99int nv50_pior_power(NV50_DISP_MTHD_V1);
97
98int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
99int nv50_pior_power(struct nv50_disp_priv *, int, u32);
100 100
101struct nv50_disp_base { 101struct nv50_disp_base {
102 struct nouveau_parent base; 102 struct nouveau_parent base;
@@ -104,14 +104,19 @@ struct nv50_disp_base {
104 u32 chan; 104 u32 chan;
105}; 105};
106 106
107struct nv50_disp_chan_impl {
108 struct nouveau_ofuncs base;
109 int chid;
110 int (*attach)(struct nouveau_object *, struct nouveau_object *, u32);
111 void (*detach)(struct nouveau_object *, int);
112};
113
107struct nv50_disp_chan { 114struct nv50_disp_chan {
108 struct nouveau_namedb base; 115 struct nouveau_namedb base;
109 int chid; 116 int chid;
110}; 117};
111 118
112int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, 119int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *);
113 struct nouveau_oclass *, int, int, void **);
114void nv50_disp_chan_destroy(struct nv50_disp_chan *);
115u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); 120u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
116void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); 121void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
117 122
@@ -120,20 +125,20 @@ void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
120#define nv50_disp_chan_fini(a,b) \ 125#define nv50_disp_chan_fini(a,b) \
121 nouveau_namedb_fini(&(a)->base, (b)) 126 nouveau_namedb_fini(&(a)->base, (b))
122 127
123int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
124 struct nouveau_oclass *, u32, int, int, void **);
125void nv50_disp_dmac_dtor(struct nouveau_object *);
126
127struct nv50_disp_dmac { 128struct nv50_disp_dmac {
128 struct nv50_disp_chan base; 129 struct nv50_disp_chan base;
129 struct nouveau_dmaobj *pushdma; 130 struct nouveau_dmaobj *pushdma;
130 u32 push; 131 u32 push;
131}; 132};
132 133
134void nv50_disp_dmac_dtor(struct nouveau_object *);
135
133struct nv50_disp_pioc { 136struct nv50_disp_pioc {
134 struct nv50_disp_chan base; 137 struct nv50_disp_chan base;
135}; 138};
136 139
140void nv50_disp_pioc_dtor(struct nouveau_object *);
141
137struct nv50_disp_mthd_list { 142struct nv50_disp_mthd_list {
138 u32 mthd; 143 u32 mthd;
139 u32 addr; 144 u32 addr;
@@ -154,47 +159,67 @@ struct nv50_disp_mthd_chan {
154 } data[]; 159 } data[];
155}; 160};
156 161
157extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; 162extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
163int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
164 struct nouveau_oclass *, void *, u32,
165 struct nouveau_object **);
158extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base; 166extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
159extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor; 167extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
160extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior; 168extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
161extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; 169extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
170int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
171 struct nouveau_oclass *, void *, u32,
172 struct nouveau_object **);
162extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image; 173extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
163extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; 174extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
175int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
176 struct nouveau_oclass *, void *, u32,
177 struct nouveau_object **);
164extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base; 178extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
165extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; 179extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
166extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; 180int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *,
181 struct nouveau_oclass *, void *, u32,
182 struct nouveau_object **);
183extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
184int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
185 struct nouveau_oclass *, void *, u32,
186 struct nouveau_object **);
167extern struct nouveau_ofuncs nv50_disp_base_ofuncs; 187extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
188int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
189 struct nouveau_oclass *, void *, u32,
190 struct nouveau_object **);
191void nv50_disp_base_dtor(struct nouveau_object *);
192extern struct nouveau_omthds nv50_disp_base_omthds[];
168extern struct nouveau_oclass nv50_disp_cclass; 193extern struct nouveau_oclass nv50_disp_cclass;
169void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head, 194void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
170 const struct nv50_disp_mthd_chan *); 195 const struct nv50_disp_mthd_chan *);
171void nv50_disp_intr_supervisor(struct work_struct *); 196void nv50_disp_intr_supervisor(struct work_struct *);
172void nv50_disp_intr(struct nouveau_subdev *); 197void nv50_disp_intr(struct nouveau_subdev *);
198extern const struct nvkm_event_func nv50_disp_vblank_func;
173 199
174extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan; 200extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
175extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac; 201extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
176extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head; 202extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
177extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan; 203extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
178extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan; 204extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
179extern struct nouveau_omthds nv84_disp_base_omthds[];
180 205
181extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan; 206extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
182 207
183extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; 208extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
184extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base; 209extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
185extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac; 210extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
186extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor; 211extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
187extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior; 212extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
188extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; 213extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
189extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; 214extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
190extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan; 215extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
191extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; 216extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
192extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; 217extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
193extern struct nouveau_omthds nvd0_disp_base_omthds[];
194extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; 218extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
195extern struct nouveau_oclass nvd0_disp_cclass; 219extern struct nouveau_oclass nvd0_disp_cclass;
196void nvd0_disp_intr_supervisor(struct work_struct *); 220void nvd0_disp_intr_supervisor(struct work_struct *);
197void nvd0_disp_intr(struct nouveau_subdev *); 221void nvd0_disp_intr(struct nouveau_subdev *);
222extern const struct nvkm_event_func nvd0_disp_vblank_func;
198 223
199extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; 224extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
200extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; 225extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 1cc62e434683..788ced1b6182 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -204,31 +204,17 @@ nv84_disp_ovly_mthd_chan = {
204 204
205static struct nouveau_oclass 205static struct nouveau_oclass
206nv84_disp_sclass[] = { 206nv84_disp_sclass[] = {
207 { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 207 { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
208 { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, 208 { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
209 { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, 209 { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
210 { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, 210 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
211 { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, 211 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
212 {} 212 {}
213}; 213};
214 214
215struct nouveau_omthds
216nv84_disp_base_omthds[] = {
217 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
218 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
219 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
220 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
221 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
222 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
223 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
224 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
225 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
226 {},
227};
228
229static struct nouveau_oclass 215static struct nouveau_oclass
230nv84_disp_base_oclass[] = { 216nv84_disp_base_oclass[] = {
231 { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, 217 { G82_DISP, &nv50_disp_base_ofuncs },
232 {} 218 {}
233}; 219};
234 220
@@ -276,9 +262,11 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
276 .init = _nouveau_disp_init, 262 .init = _nouveau_disp_init,
277 .fini = _nouveau_disp_fini, 263 .fini = _nouveau_disp_fini,
278 }, 264 },
265 .base.vblank = &nv50_disp_vblank_func,
279 .base.outp = nv50_disp_outp_sclass, 266 .base.outp = nv50_disp_outp_sclass,
280 .mthd.core = &nv84_disp_mast_mthd_chan, 267 .mthd.core = &nv84_disp_mast_mthd_chan,
281 .mthd.base = &nv84_disp_sync_mthd_chan, 268 .mthd.base = &nv84_disp_sync_mthd_chan,
282 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 269 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
283 .mthd.prev = 0x000004, 270 .mthd.prev = 0x000004,
271 .head.scanoutpos = nv50_disp_base_scanoutpos,
284}.base.base; 272}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index 4f718a9f5aef..fa79de906eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -63,32 +63,17 @@ nv94_disp_mast_mthd_chan = {
63 63
64static struct nouveau_oclass 64static struct nouveau_oclass
65nv94_disp_sclass[] = { 65nv94_disp_sclass[] = {
66 { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 66 { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
67 { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, 67 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
68 { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, 68 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
69 { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, 69 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
70 { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, 70 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
71 {} 71 {}
72}; 72};
73 73
74static struct nouveau_omthds
75nv94_disp_base_omthds[] = {
76 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
77 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
78 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
79 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
80 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
81 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
82 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
83 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
84 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
85 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
86 {},
87};
88
89static struct nouveau_oclass 74static struct nouveau_oclass
90nv94_disp_base_oclass[] = { 75nv94_disp_base_oclass[] = {
91 { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, 76 { GT206_DISP, &nv50_disp_base_ofuncs },
92 {} 77 {}
93}; 78};
94 79
@@ -143,9 +128,11 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
143 .init = _nouveau_disp_init, 128 .init = _nouveau_disp_init,
144 .fini = _nouveau_disp_fini, 129 .fini = _nouveau_disp_fini,
145 }, 130 },
131 .base.vblank = &nv50_disp_vblank_func,
146 .base.outp = nv94_disp_outp_sclass, 132 .base.outp = nv94_disp_outp_sclass,
147 .mthd.core = &nv94_disp_mast_mthd_chan, 133 .mthd.core = &nv94_disp_mast_mthd_chan,
148 .mthd.base = &nv84_disp_sync_mthd_chan, 134 .mthd.base = &nv84_disp_sync_mthd_chan,
149 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 135 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
150 .mthd.prev = 0x000004, 136 .mthd.prev = 0x000004,
137 .head.scanoutpos = nv50_disp_base_scanoutpos,
151}.base.base; 138}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 6237a9a36f70..7af15f5d48dc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -80,17 +80,17 @@ nva0_disp_ovly_mthd_chan = {
80 80
81static struct nouveau_oclass 81static struct nouveau_oclass
82nva0_disp_sclass[] = { 82nva0_disp_sclass[] = {
83 { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 83 { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
84 { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, 84 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
85 { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, 85 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
86 { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, 86 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
87 { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, 87 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
88 {} 88 {}
89}; 89};
90 90
91static struct nouveau_oclass 91static struct nouveau_oclass
92nva0_disp_base_oclass[] = { 92nva0_disp_base_oclass[] = {
93 { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, 93 { GT200_DISP, &nv50_disp_base_ofuncs },
94 {} 94 {}
95}; 95};
96 96
@@ -138,9 +138,11 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
138 .init = _nouveau_disp_init, 138 .init = _nouveau_disp_init,
139 .fini = _nouveau_disp_fini, 139 .fini = _nouveau_disp_fini,
140 }, 140 },
141 .base.vblank = &nv50_disp_vblank_func,
141 .base.outp = nv50_disp_outp_sclass, 142 .base.outp = nv50_disp_outp_sclass,
142 .mthd.core = &nv84_disp_mast_mthd_chan, 143 .mthd.core = &nv84_disp_mast_mthd_chan,
143 .mthd.base = &nv84_disp_sync_mthd_chan, 144 .mthd.base = &nv84_disp_sync_mthd_chan,
144 .mthd.ovly = &nva0_disp_ovly_mthd_chan, 145 .mthd.ovly = &nva0_disp_ovly_mthd_chan,
145 .mthd.prev = 0x000004, 146 .mthd.prev = 0x000004,
147 .head.scanoutpos = nv50_disp_base_scanoutpos,
146}.base.base; 148}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 019124d4782b..6bd39448f8da 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -35,33 +35,17 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37nva3_disp_sclass[] = { 37nva3_disp_sclass[] = {
38 { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, 38 { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
39 { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, 39 { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
40 { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, 40 { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
41 { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, 41 { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
42 { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, 42 { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
43 {} 43 {}
44}; 44};
45 45
46static struct nouveau_omthds
47nva3_disp_base_omthds[] = {
48 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
49 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
50 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
51 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
52 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
53 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
55 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
56 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
57 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
58 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
59 {},
60};
61
62static struct nouveau_oclass 46static struct nouveau_oclass
63nva3_disp_base_oclass[] = { 47nva3_disp_base_oclass[] = {
64 { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, 48 { GT214_DISP, &nv50_disp_base_ofuncs },
65 {} 49 {}
66}; 50};
67 51
@@ -110,9 +94,11 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
110 .init = _nouveau_disp_init, 94 .init = _nouveau_disp_init,
111 .fini = _nouveau_disp_fini, 95 .fini = _nouveau_disp_fini,
112 }, 96 },
97 .base.vblank = &nv50_disp_vblank_func,
113 .base.outp = nv94_disp_outp_sclass, 98 .base.outp = nv94_disp_outp_sclass,
114 .mthd.core = &nv94_disp_mast_mthd_chan, 99 .mthd.core = &nv94_disp_mast_mthd_chan,
115 .mthd.base = &nv84_disp_sync_mthd_chan, 100 .mthd.base = &nv84_disp_sync_mthd_chan,
116 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 101 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
117 .mthd.prev = 0x000004, 102 .mthd.prev = 0x000004,
103 .head.scanoutpos = nv50_disp_base_scanoutpos,
118}.base.base; 104}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index fa30d8196f35..a4bb3c774ee1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -23,9 +23,11 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/client.h>
26#include <core/parent.h> 27#include <core/parent.h>
27#include <core/handle.h> 28#include <core/handle.h>
28#include <core/class.h> 29#include <nvif/unpack.h>
30#include <nvif/class.h>
29 31
30#include <engine/disp.h> 32#include <engine/disp.h>
31 33
@@ -265,30 +267,6 @@ nvd0_disp_mast_mthd_chan = {
265}; 267};
266 268
267static int 269static int
268nvd0_disp_mast_ctor(struct nouveau_object *parent,
269 struct nouveau_object *engine,
270 struct nouveau_oclass *oclass, void *data, u32 size,
271 struct nouveau_object **pobject)
272{
273 struct nv50_display_mast_class *args = data;
274 struct nv50_disp_dmac *mast;
275 int ret;
276
277 if (size < sizeof(*args))
278 return -EINVAL;
279
280 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
281 0, sizeof(*mast), (void **)&mast);
282 *pobject = nv_object(mast);
283 if (ret)
284 return ret;
285
286 nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
287 nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
288 return 0;
289}
290
291static int
292nvd0_disp_mast_init(struct nouveau_object *object) 270nvd0_disp_mast_init(struct nouveau_object *object)
293{ 271{
294 struct nv50_disp_priv *priv = (void *)object->engine; 272 struct nv50_disp_priv *priv = (void *)object->engine;
@@ -342,14 +320,18 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
342 return nv50_disp_chan_fini(&mast->base, suspend); 320 return nv50_disp_chan_fini(&mast->base, suspend);
343} 321}
344 322
345struct nouveau_ofuncs 323struct nv50_disp_chan_impl
346nvd0_disp_mast_ofuncs = { 324nvd0_disp_mast_ofuncs = {
347 .ctor = nvd0_disp_mast_ctor, 325 .base.ctor = nv50_disp_mast_ctor,
348 .dtor = nv50_disp_dmac_dtor, 326 .base.dtor = nv50_disp_dmac_dtor,
349 .init = nvd0_disp_mast_init, 327 .base.init = nvd0_disp_mast_init,
350 .fini = nvd0_disp_mast_fini, 328 .base.fini = nvd0_disp_mast_fini,
351 .rd32 = nv50_disp_chan_rd32, 329 .base.map = nv50_disp_chan_map,
352 .wr32 = nv50_disp_chan_wr32, 330 .base.rd32 = nv50_disp_chan_rd32,
331 .base.wr32 = nv50_disp_chan_wr32,
332 .chid = 0,
333 .attach = nvd0_disp_dmac_object_attach,
334 .detach = nvd0_disp_dmac_object_detach,
353}; 335};
354 336
355/******************************************************************************* 337/*******************************************************************************
@@ -431,40 +413,18 @@ nvd0_disp_sync_mthd_chan = {
431 } 413 }
432}; 414};
433 415
434static int 416struct nv50_disp_chan_impl
435nvd0_disp_sync_ctor(struct nouveau_object *parent,
436 struct nouveau_object *engine,
437 struct nouveau_oclass *oclass, void *data, u32 size,
438 struct nouveau_object **pobject)
439{
440 struct nv50_display_sync_class *args = data;
441 struct nv50_disp_priv *priv = (void *)engine;
442 struct nv50_disp_dmac *dmac;
443 int ret;
444
445 if (size < sizeof(*args) || args->head >= priv->head.nr)
446 return -EINVAL;
447
448 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
449 1 + args->head, sizeof(*dmac),
450 (void **)&dmac);
451 *pobject = nv_object(dmac);
452 if (ret)
453 return ret;
454
455 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
456 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
457 return 0;
458}
459
460struct nouveau_ofuncs
461nvd0_disp_sync_ofuncs = { 417nvd0_disp_sync_ofuncs = {
462 .ctor = nvd0_disp_sync_ctor, 418 .base.ctor = nv50_disp_sync_ctor,
463 .dtor = nv50_disp_dmac_dtor, 419 .base.dtor = nv50_disp_dmac_dtor,
464 .init = nvd0_disp_dmac_init, 420 .base.init = nvd0_disp_dmac_init,
465 .fini = nvd0_disp_dmac_fini, 421 .base.fini = nvd0_disp_dmac_fini,
466 .rd32 = nv50_disp_chan_rd32, 422 .base.map = nv50_disp_chan_map,
467 .wr32 = nv50_disp_chan_wr32, 423 .base.rd32 = nv50_disp_chan_rd32,
424 .base.wr32 = nv50_disp_chan_wr32,
425 .chid = 1,
426 .attach = nvd0_disp_dmac_object_attach,
427 .detach = nvd0_disp_dmac_object_detach,
468}; 428};
469 429
470/******************************************************************************* 430/*******************************************************************************
@@ -533,40 +493,18 @@ nvd0_disp_ovly_mthd_chan = {
533 } 493 }
534}; 494};
535 495
536static int 496struct nv50_disp_chan_impl
537nvd0_disp_ovly_ctor(struct nouveau_object *parent,
538 struct nouveau_object *engine,
539 struct nouveau_oclass *oclass, void *data, u32 size,
540 struct nouveau_object **pobject)
541{
542 struct nv50_display_ovly_class *args = data;
543 struct nv50_disp_priv *priv = (void *)engine;
544 struct nv50_disp_dmac *dmac;
545 int ret;
546
547 if (size < sizeof(*args) || args->head >= priv->head.nr)
548 return -EINVAL;
549
550 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
551 5 + args->head, sizeof(*dmac),
552 (void **)&dmac);
553 *pobject = nv_object(dmac);
554 if (ret)
555 return ret;
556
557 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
558 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
559 return 0;
560}
561
562struct nouveau_ofuncs
563nvd0_disp_ovly_ofuncs = { 497nvd0_disp_ovly_ofuncs = {
564 .ctor = nvd0_disp_ovly_ctor, 498 .base.ctor = nv50_disp_ovly_ctor,
565 .dtor = nv50_disp_dmac_dtor, 499 .base.dtor = nv50_disp_dmac_dtor,
566 .init = nvd0_disp_dmac_init, 500 .base.init = nvd0_disp_dmac_init,
567 .fini = nvd0_disp_dmac_fini, 501 .base.fini = nvd0_disp_dmac_fini,
568 .rd32 = nv50_disp_chan_rd32, 502 .base.map = nv50_disp_chan_map,
569 .wr32 = nv50_disp_chan_wr32, 503 .base.rd32 = nv50_disp_chan_rd32,
504 .base.wr32 = nv50_disp_chan_wr32,
505 .chid = 5,
506 .attach = nvd0_disp_dmac_object_attach,
507 .detach = nvd0_disp_dmac_object_detach,
570}; 508};
571 509
572/******************************************************************************* 510/*******************************************************************************
@@ -574,23 +512,6 @@ nvd0_disp_ovly_ofuncs = {
574 ******************************************************************************/ 512 ******************************************************************************/
575 513
576static int 514static int
577nvd0_disp_pioc_create_(struct nouveau_object *parent,
578 struct nouveau_object *engine,
579 struct nouveau_oclass *oclass, int chid,
580 int length, void **pobject)
581{
582 return nv50_disp_chan_create_(parent, engine, oclass, chid,
583 length, pobject);
584}
585
586static void
587nvd0_disp_pioc_dtor(struct nouveau_object *object)
588{
589 struct nv50_disp_pioc *pioc = (void *)object;
590 nv50_disp_chan_destroy(&pioc->base);
591}
592
593static int
594nvd0_disp_pioc_init(struct nouveau_object *object) 515nvd0_disp_pioc_init(struct nouveau_object *object)
595{ 516{
596 struct nv50_disp_priv *priv = (void *)object->engine; 517 struct nv50_disp_priv *priv = (void *)object->engine;
@@ -643,152 +564,68 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
643 * EVO immediate overlay channel objects 564 * EVO immediate overlay channel objects
644 ******************************************************************************/ 565 ******************************************************************************/
645 566
646static int 567struct nv50_disp_chan_impl
647nvd0_disp_oimm_ctor(struct nouveau_object *parent,
648 struct nouveau_object *engine,
649 struct nouveau_oclass *oclass, void *data, u32 size,
650 struct nouveau_object **pobject)
651{
652 struct nv50_display_oimm_class *args = data;
653 struct nv50_disp_priv *priv = (void *)engine;
654 struct nv50_disp_pioc *pioc;
655 int ret;
656
657 if (size < sizeof(*args) || args->head >= priv->head.nr)
658 return -EINVAL;
659
660 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
661 sizeof(*pioc), (void **)&pioc);
662 *pobject = nv_object(pioc);
663 if (ret)
664 return ret;
665
666 return 0;
667}
668
669struct nouveau_ofuncs
670nvd0_disp_oimm_ofuncs = { 568nvd0_disp_oimm_ofuncs = {
671 .ctor = nvd0_disp_oimm_ctor, 569 .base.ctor = nv50_disp_oimm_ctor,
672 .dtor = nvd0_disp_pioc_dtor, 570 .base.dtor = nv50_disp_pioc_dtor,
673 .init = nvd0_disp_pioc_init, 571 .base.init = nvd0_disp_pioc_init,
674 .fini = nvd0_disp_pioc_fini, 572 .base.fini = nvd0_disp_pioc_fini,
675 .rd32 = nv50_disp_chan_rd32, 573 .base.map = nv50_disp_chan_map,
676 .wr32 = nv50_disp_chan_wr32, 574 .base.rd32 = nv50_disp_chan_rd32,
575 .base.wr32 = nv50_disp_chan_wr32,
576 .chid = 9,
677}; 577};
678 578
679/******************************************************************************* 579/*******************************************************************************
680 * EVO cursor channel objects 580 * EVO cursor channel objects
681 ******************************************************************************/ 581 ******************************************************************************/
682 582
683static int 583struct nv50_disp_chan_impl
684nvd0_disp_curs_ctor(struct nouveau_object *parent,
685 struct nouveau_object *engine,
686 struct nouveau_oclass *oclass, void *data, u32 size,
687 struct nouveau_object **pobject)
688{
689 struct nv50_display_curs_class *args = data;
690 struct nv50_disp_priv *priv = (void *)engine;
691 struct nv50_disp_pioc *pioc;
692 int ret;
693
694 if (size < sizeof(*args) || args->head >= priv->head.nr)
695 return -EINVAL;
696
697 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
698 sizeof(*pioc), (void **)&pioc);
699 *pobject = nv_object(pioc);
700 if (ret)
701 return ret;
702
703 return 0;
704}
705
706struct nouveau_ofuncs
707nvd0_disp_curs_ofuncs = { 584nvd0_disp_curs_ofuncs = {
708 .ctor = nvd0_disp_curs_ctor, 585 .base.ctor = nv50_disp_curs_ctor,
709 .dtor = nvd0_disp_pioc_dtor, 586 .base.dtor = nv50_disp_pioc_dtor,
710 .init = nvd0_disp_pioc_init, 587 .base.init = nvd0_disp_pioc_init,
711 .fini = nvd0_disp_pioc_fini, 588 .base.fini = nvd0_disp_pioc_fini,
712 .rd32 = nv50_disp_chan_rd32, 589 .base.map = nv50_disp_chan_map,
713 .wr32 = nv50_disp_chan_wr32, 590 .base.rd32 = nv50_disp_chan_rd32,
591 .base.wr32 = nv50_disp_chan_wr32,
592 .chid = 13,
714}; 593};
715 594
716/******************************************************************************* 595/*******************************************************************************
717 * Base display object 596 * Base display object
718 ******************************************************************************/ 597 ******************************************************************************/
719 598
720static int 599int
721nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, 600nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
722 void *data, u32 size)
723{
724 struct nv50_disp_priv *priv = (void *)object->engine;
725 struct nv04_display_scanoutpos *args = data;
726 const int head = (mthd & NV50_DISP_MTHD_HEAD);
727 u32 blanke, blanks, total;
728
729 if (size < sizeof(*args) || head >= priv->head.nr)
730 return -EINVAL;
731
732 total = nv_rd32(priv, 0x640414 + (head * 0x300));
733 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
734 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
735
736 args->vblanke = (blanke & 0xffff0000) >> 16;
737 args->hblanke = (blanke & 0x0000ffff);
738 args->vblanks = (blanks & 0xffff0000) >> 16;
739 args->hblanks = (blanks & 0x0000ffff);
740 args->vtotal = ( total & 0xffff0000) >> 16;
741 args->htotal = ( total & 0x0000ffff);
742
743 args->time[0] = ktime_to_ns(ktime_get());
744 args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
745 args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
746 args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
747 return 0;
748}
749
750static void
751nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
752{
753 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
754}
755
756static void
757nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
758{ 601{
759 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); 602 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
760} 603 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
761 604 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
762static int 605 union {
763nvd0_disp_base_ctor(struct nouveau_object *parent, 606 struct nv04_disp_scanoutpos_v0 v0;
764 struct nouveau_object *engine, 607 } *args = data;
765 struct nouveau_oclass *oclass, void *data, u32 size,
766 struct nouveau_object **pobject)
767{
768 struct nv50_disp_priv *priv = (void *)engine;
769 struct nv50_disp_base *base;
770 int ret; 608 int ret;
771 609
772 ret = nouveau_parent_create(parent, engine, oclass, 0, 610 nv_ioctl(object, "disp scanoutpos size %d\n", size);
773 priv->sclass, 0, &base); 611 if (nvif_unpack(args->v0, 0, 0, false)) {
774 *pobject = nv_object(base); 612 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
775 if (ret) 613 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
614 args->v0.hblanke = (blanke & 0x0000ffff);
615 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
616 args->v0.hblanks = (blanks & 0x0000ffff);
617 args->v0.vtotal = ( total & 0xffff0000) >> 16;
618 args->v0.htotal = ( total & 0x0000ffff);
619 args->v0.time[0] = ktime_to_ns(ktime_get());
620 args->v0.vline = /* vline read locks hline */
621 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
622 args->v0.time[1] = ktime_to_ns(ktime_get());
623 args->v0.hline =
624 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
625 } else
776 return ret; 626 return ret;
777 627
778 priv->base.vblank->priv = priv; 628 return 0;
779 priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
780 priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
781
782 return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
783 &base->ramht);
784}
785
786static void
787nvd0_disp_base_dtor(struct nouveau_object *object)
788{
789 struct nv50_disp_base *base = (void *)object;
790 nouveau_ramht_ref(NULL, &base->ramht);
791 nouveau_parent_destroy(&base->base);
792} 629}
793 630
794static int 631static int
@@ -874,41 +711,27 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
874 711
875struct nouveau_ofuncs 712struct nouveau_ofuncs
876nvd0_disp_base_ofuncs = { 713nvd0_disp_base_ofuncs = {
877 .ctor = nvd0_disp_base_ctor, 714 .ctor = nv50_disp_base_ctor,
878 .dtor = nvd0_disp_base_dtor, 715 .dtor = nv50_disp_base_dtor,
879 .init = nvd0_disp_base_init, 716 .init = nvd0_disp_base_init,
880 .fini = nvd0_disp_base_fini, 717 .fini = nvd0_disp_base_fini,
881}; 718 .mthd = nv50_disp_base_mthd,
882 719 .ntfy = nouveau_disp_ntfy,
883struct nouveau_omthds
884nvd0_disp_base_omthds[] = {
885 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
886 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
887 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
888 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
889 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
890 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
891 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
892 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
893 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
894 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
895 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
896 {},
897}; 720};
898 721
899static struct nouveau_oclass 722static struct nouveau_oclass
900nvd0_disp_base_oclass[] = { 723nvd0_disp_base_oclass[] = {
901 { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, 724 { GF110_DISP, &nvd0_disp_base_ofuncs },
902 {} 725 {}
903}; 726};
904 727
905static struct nouveau_oclass 728static struct nouveau_oclass
906nvd0_disp_sclass[] = { 729nvd0_disp_sclass[] = {
907 { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 730 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
908 { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, 731 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
909 { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, 732 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
910 { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, 733 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
911 { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, 734 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
912 {} 735 {}
913}; 736};
914 737
@@ -916,6 +739,27 @@ nvd0_disp_sclass[] = {
916 * Display engine implementation 739 * Display engine implementation
917 ******************************************************************************/ 740 ******************************************************************************/
918 741
742static void
743nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
744{
745 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
746 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
747}
748
749static void
750nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
751{
752 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
753 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
754}
755
756const struct nvkm_event_func
757nvd0_disp_vblank_func = {
758 .ctor = nouveau_disp_vblank_ctor,
759 .init = nvd0_disp_vblank_init,
760 .fini = nvd0_disp_vblank_fini,
761};
762
919static struct nvkm_output * 763static struct nvkm_output *
920exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, 764exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
921 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 765 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -1343,7 +1187,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
1343 if (mask & intr) { 1187 if (mask & intr) {
1344 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); 1188 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1345 if (stat & 0x00000001) 1189 if (stat & 0x00000001)
1346 nouveau_event_trigger(priv->base.vblank, 1, i); 1190 nouveau_disp_vblank(&priv->base, i);
1347 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); 1191 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1348 nv_rd32(priv, 0x6100c0 + (i * 0x800)); 1192 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1349 } 1193 }
@@ -1396,9 +1240,11 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
1396 .init = _nouveau_disp_init, 1240 .init = _nouveau_disp_init,
1397 .fini = _nouveau_disp_fini, 1241 .fini = _nouveau_disp_fini,
1398 }, 1242 },
1243 .base.vblank = &nvd0_disp_vblank_func,
1399 .base.outp = nvd0_disp_outp_sclass, 1244 .base.outp = nvd0_disp_outp_sclass,
1400 .mthd.core = &nvd0_disp_mast_mthd_chan, 1245 .mthd.core = &nvd0_disp_mast_mthd_chan,
1401 .mthd.base = &nvd0_disp_sync_mthd_chan, 1246 .mthd.base = &nvd0_disp_sync_mthd_chan,
1402 .mthd.ovly = &nvd0_disp_ovly_mthd_chan, 1247 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1403 .mthd.prev = -0x020000, 1248 .mthd.prev = -0x020000,
1249 .head.scanoutpos = nvd0_disp_base_scanoutpos,
1404}.base.base; 1250}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 11328e3f5df1..47fef1e398c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -200,17 +200,17 @@ nve0_disp_ovly_mthd_chan = {
200 200
201static struct nouveau_oclass 201static struct nouveau_oclass
202nve0_disp_sclass[] = { 202nve0_disp_sclass[] = {
203 { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 203 { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
204 { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, 204 { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
205 { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, 205 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
206 { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, 206 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
207 { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, 207 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
208 {} 208 {}
209}; 209};
210 210
211static struct nouveau_oclass 211static struct nouveau_oclass
212nve0_disp_base_oclass[] = { 212nve0_disp_base_oclass[] = {
213 { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, 213 { GK104_DISP, &nvd0_disp_base_ofuncs },
214 {} 214 {}
215}; 215};
216 216
@@ -258,9 +258,11 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
258 .init = _nouveau_disp_init, 258 .init = _nouveau_disp_init,
259 .fini = _nouveau_disp_fini, 259 .fini = _nouveau_disp_fini,
260 }, 260 },
261 .base.vblank = &nvd0_disp_vblank_func,
261 .base.outp = nvd0_disp_outp_sclass, 262 .base.outp = nvd0_disp_outp_sclass,
262 .mthd.core = &nve0_disp_mast_mthd_chan, 263 .mthd.core = &nve0_disp_mast_mthd_chan,
263 .mthd.base = &nvd0_disp_sync_mthd_chan, 264 .mthd.base = &nvd0_disp_sync_mthd_chan,
264 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 265 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
265 .mthd.prev = -0x020000, 266 .mthd.prev = -0x020000,
267 .head.scanoutpos = nvd0_disp_base_scanoutpos,
266}.base.base; 268}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 104388081d73..04bda4ac4ed3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -25,7 +25,7 @@
25#include <engine/software.h> 25#include <engine/software.h>
26#include <engine/disp.h> 26#include <engine/disp.h>
27 27
28#include <core/class.h> 28#include <nvif/class.h>
29 29
30#include "nv50.h" 30#include "nv50.h"
31 31
@@ -35,17 +35,17 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37nvf0_disp_sclass[] = { 37nvf0_disp_sclass[] = {
38 { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, 38 { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
39 { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, 39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
40 { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, 40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
41 { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, 41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
42 { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, 42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
43 {} 43 {}
44}; 44};
45 45
46static struct nouveau_oclass 46static struct nouveau_oclass
47nvf0_disp_base_oclass[] = { 47nvf0_disp_base_oclass[] = {
48 { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, 48 { GK110_DISP, &nvd0_disp_base_ofuncs },
49 {} 49 {}
50}; 50};
51 51
@@ -93,9 +93,11 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
93 .init = _nouveau_disp_init, 93 .init = _nouveau_disp_init,
94 .fini = _nouveau_disp_fini, 94 .fini = _nouveau_disp_fini,
95 }, 95 },
96 .base.vblank = &nvd0_disp_vblank_func,
96 .base.outp = nvd0_disp_outp_sclass, 97 .base.outp = nvd0_disp_outp_sclass,
97 .mthd.core = &nve0_disp_mast_mthd_chan, 98 .mthd.core = &nve0_disp_mast_mthd_chan,
98 .mthd.base = &nvd0_disp_sync_mthd_chan, 99 .mthd.base = &nvd0_disp_sync_mthd_chan,
99 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 100 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
100 .mthd.prev = -0x020000, 101 .mthd.prev = -0x020000,
102 .head.scanoutpos = nvd0_disp_base_scanoutpos,
101}.base.base; 103}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index ad9ba7ccec7f..a5ff00a9cedc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -78,6 +78,7 @@ nvkm_output_create_(struct nouveau_object *parent,
78 78
79 outp->info = *dcbE; 79 outp->info = *dcbE;
80 outp->index = index; 80 outp->index = index;
81 outp->or = ffs(outp->info.or) - 1;
81 82
82 DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n", 83 DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
83 dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ? 84 dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
index bc76fbf85710..187f435ad0e2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
@@ -9,6 +9,7 @@ struct nvkm_output {
9 9
10 struct dcb_output info; 10 struct dcb_output info;
11 int index; 11 int index;
12 int or;
12 13
13 struct nouveau_i2c_port *port; 14 struct nouveau_i2c_port *port;
14 struct nouveau_i2c_port *edid; 15 struct nouveau_i2c_port *edid;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index eb2d7789555d..6f6e2a898270 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -22,6 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <nvif/event.h>
27
25#include <subdev/i2c.h> 28#include <subdev/i2c.h>
26 29
27#include "outpdp.h" 30#include "outpdp.h"
@@ -86,7 +89,7 @@ done:
86 atomic_set(&outp->lt.done, 0); 89 atomic_set(&outp->lt.done, 0);
87 schedule_work(&outp->lt.work); 90 schedule_work(&outp->lt.work);
88 } else { 91 } else {
89 nouveau_event_get(outp->irq); 92 nvkm_notify_get(&outp->irq);
90 } 93 }
91 94
92 if (wait) { 95 if (wait) {
@@ -133,46 +136,59 @@ nvkm_output_dp_detect(struct nvkm_output_dp *outp)
133 } 136 }
134} 137}
135 138
136static void 139static int
137nvkm_output_dp_service_work(struct work_struct *work) 140nvkm_output_dp_hpd(struct nvkm_notify *notify)
138{ 141{
139 struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work); 142 struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
140 struct nouveau_disp *disp = nouveau_disp(outp); 143 struct nvkm_output_dp *outp;
141 int type = atomic_xchg(&outp->pending, 0); 144 struct nouveau_disp *disp = nouveau_disp(conn);
142 u32 send = 0; 145 const struct nvkm_i2c_ntfy_rep *line = notify->data;
143 146 struct nvif_notify_conn_rep_v0 rep = {};
144 if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) { 147
145 nvkm_output_dp_detect(outp); 148 list_for_each_entry(outp, &disp->outp, base.head) {
146 if (type & NVKM_I2C_UNPLUG) 149 if (outp->base.conn == conn &&
147 send |= NVKM_HPD_UNPLUG; 150 outp->info.type == DCB_OUTPUT_DP) {
148 if (type & NVKM_I2C_PLUG) 151 DBG("HPD: %d\n", line->mask);
149 send |= NVKM_HPD_PLUG; 152 nvkm_output_dp_detect(outp);
150 nouveau_event_get(outp->base.conn->hpd.event); 153
151 } 154 if (line->mask & NVKM_I2C_UNPLUG)
152 155 rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
153 if (type & NVKM_I2C_IRQ) { 156 if (line->mask & NVKM_I2C_PLUG)
154 nvkm_output_dp_train(&outp->base, 0, true); 157 rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
155 send |= NVKM_HPD_IRQ; 158
159 nvkm_event_send(&disp->hpd, rep.mask, conn->index,
160 &rep, sizeof(rep));
161 return NVKM_NOTIFY_KEEP;
162 }
156 } 163 }
157 164
158 nouveau_event_trigger(disp->hpd, send, outp->base.info.connector); 165 WARN_ON(1);
166 return NVKM_NOTIFY_DROP;
159} 167}
160 168
161static int 169static int
162nvkm_output_dp_service(void *data, u32 type, int index) 170nvkm_output_dp_irq(struct nvkm_notify *notify)
163{ 171{
164 struct nvkm_output_dp *outp = data; 172 struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
165 DBG("HPD: %d\n", type); 173 struct nouveau_disp *disp = nouveau_disp(outp);
166 atomic_or(type, &outp->pending); 174 const struct nvkm_i2c_ntfy_rep *line = notify->data;
167 schedule_work(&outp->work); 175 struct nvif_notify_conn_rep_v0 rep = {
168 return NVKM_EVENT_DROP; 176 .mask = NVIF_NOTIFY_CONN_V0_IRQ,
177 };
178 int index = outp->base.info.connector;
179
180 DBG("IRQ: %d\n", line->mask);
181 nvkm_output_dp_train(&outp->base, 0, true);
182
183 nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
184 return NVKM_NOTIFY_DROP;
169} 185}
170 186
171int 187int
172_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend) 188_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
173{ 189{
174 struct nvkm_output_dp *outp = (void *)object; 190 struct nvkm_output_dp *outp = (void *)object;
175 nouveau_event_put(outp->irq); 191 nvkm_notify_put(&outp->irq);
176 nvkm_output_dp_enable(outp, false); 192 nvkm_output_dp_enable(outp, false);
177 return nvkm_output_fini(&outp->base, suspend); 193 return nvkm_output_fini(&outp->base, suspend);
178} 194}
@@ -189,7 +205,7 @@ void
189_nvkm_output_dp_dtor(struct nouveau_object *object) 205_nvkm_output_dp_dtor(struct nouveau_object *object)
190{ 206{
191 struct nvkm_output_dp *outp = (void *)object; 207 struct nvkm_output_dp *outp = (void *)object;
192 nouveau_event_ref(NULL, &outp->irq); 208 nvkm_notify_fini(&outp->irq);
193 nvkm_output_destroy(&outp->base); 209 nvkm_output_destroy(&outp->base);
194} 210}
195 211
@@ -213,7 +229,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
213 if (ret) 229 if (ret)
214 return ret; 230 return ret;
215 231
216 nouveau_event_ref(NULL, &outp->base.conn->hpd.event); 232 nvkm_notify_fini(&outp->base.conn->hpd);
217 233
218 /* access to the aux channel is not optional... */ 234 /* access to the aux channel is not optional... */
219 if (!outp->base.edid) { 235 if (!outp->base.edid) {
@@ -238,20 +254,28 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
238 atomic_set(&outp->lt.done, 0); 254 atomic_set(&outp->lt.done, 0);
239 255
240 /* link maintenance */ 256 /* link maintenance */
241 ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index, 257 ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true,
242 nvkm_output_dp_service, outp, &outp->irq); 258 &(struct nvkm_i2c_ntfy_req) {
259 .mask = NVKM_I2C_IRQ,
260 .port = outp->base.edid->index,
261 },
262 sizeof(struct nvkm_i2c_ntfy_req),
263 sizeof(struct nvkm_i2c_ntfy_rep),
264 &outp->irq);
243 if (ret) { 265 if (ret) {
244 ERR("error monitoring aux irq event: %d\n", ret); 266 ERR("error monitoring aux irq event: %d\n", ret);
245 return ret; 267 return ret;
246 } 268 }
247 269
248 INIT_WORK(&outp->work, nvkm_output_dp_service_work);
249
250 /* hotplug detect, replaces gpio-based mechanism with aux events */ 270 /* hotplug detect, replaces gpio-based mechanism with aux events */
251 ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, 271 ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true,
252 outp->base.edid->index, 272 &(struct nvkm_i2c_ntfy_req) {
253 nvkm_output_dp_service, outp, 273 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
254 &outp->base.conn->hpd.event); 274 .port = outp->base.edid->index,
275 },
276 sizeof(struct nvkm_i2c_ntfy_req),
277 sizeof(struct nvkm_i2c_ntfy_rep),
278 &outp->base.conn->hpd);
255 if (ret) { 279 if (ret) {
256 ERR("error monitoring aux hpd events: %d\n", ret); 280 ERR("error monitoring aux hpd events: %d\n", ret);
257 return ret; 281 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
index ff33ba12cb67..1fac367cc867 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
@@ -12,10 +12,7 @@ struct nvkm_output_dp {
12 struct nvbios_dpout info; 12 struct nvbios_dpout info;
13 u8 version; 13 u8 version;
14 14
15 struct nouveau_eventh *irq; 15 struct nvkm_notify irq;
16 struct nouveau_eventh *hpd;
17 struct work_struct work;
18 atomic_t pending;
19 bool present; 16 bool present;
20 u8 dpcd[16]; 17 u8 dpcd[16];
21 18
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
index fe0f256f11bf..d00f89a468a7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include <subdev/bios.h> 29#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
@@ -143,38 +144,29 @@ nv50_pior_dp_impl = {
143 *****************************************************************************/ 144 *****************************************************************************/
144 145
145int 146int
146nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data) 147nv50_pior_power(NV50_DISP_MTHD_V1)
147{ 148{
148 const u32 stat = data & NV50_DISP_PIOR_PWR_STATE; 149 const u32 soff = outp->or * 0x800;
149 const u32 soff = (or * 0x800); 150 union {
151 struct nv50_disp_pior_pwr_v0 v0;
152 } *args = data;
153 u32 ctrl, type;
154 int ret;
155
156 nv_ioctl(object, "disp pior pwr size %d\n", size);
157 if (nvif_unpack(args->v0, 0, 0, false)) {
158 nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
159 args->v0.version, args->v0.state, args->v0.type);
160 if (args->v0.type > 0x0f)
161 return -EINVAL;
162 ctrl = !!args->v0.state;
163 type = args->v0.type;
164 } else
165 return ret;
166
150 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); 167 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
151 nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat); 168 nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
152 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); 169 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
170 priv->pior.type[outp->or] = type;
153 return 0; 171 return 0;
154} 172}
155
156int
157nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
158{
159 struct nv50_disp_priv *priv = (void *)object->engine;
160 const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
161 const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
162 u32 *data = args;
163 int ret;
164
165 if (size < sizeof(u32))
166 return -EINVAL;
167
168 mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
169 mthd &= ~NV50_DISP_PIOR_MTHD_OR;
170 switch (mthd) {
171 case NV50_DISP_PIOR_PWR:
172 ret = priv->pior.power(priv, or, data[0]);
173 priv->pior.type[or] = type;
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 return ret;
180}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index 26e9a42569c7..dbd43ae9df81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -11,6 +11,7 @@ struct nouveau_disp_impl {
11 struct nouveau_oclass base; 11 struct nouveau_oclass base;
12 struct nouveau_oclass **outp; 12 struct nouveau_oclass **outp;
13 struct nouveau_oclass **conn; 13 struct nouveau_oclass **conn;
14 const struct nvkm_event_func *vblank;
14}; 15};
15 16
16#define nouveau_disp_create(p,e,c,h,i,x,d) \ 17#define nouveau_disp_create(p,e,c,h,i,x,d) \
@@ -39,4 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool);
39extern struct nouveau_oclass *nvkm_output_oclass; 40extern struct nouveau_oclass *nvkm_output_oclass;
40extern struct nouveau_oclass *nvkm_connector_oclass; 41extern struct nouveau_oclass *nvkm_connector_oclass;
41 42
43int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *);
44void nouveau_disp_vblank(struct nouveau_disp *, int head);
45int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
46
42#endif 47#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 7a1ebdfa9e1b..ddf1760c4400 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27 28
28#include <subdev/bios.h> 29#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 30#include <subdev/bios/dcb.h>
@@ -32,77 +33,26 @@
32#include "nv50.h" 33#include "nv50.h"
33 34
34int 35int
35nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) 36nv50_sor_power(NV50_DISP_MTHD_V1)
36{ 37{
37 const u32 stat = data & NV50_DISP_SOR_PWR_STATE; 38 union {
38 const u32 soff = (or * 0x800); 39 struct nv50_disp_sor_pwr_v0 v0;
40 } *args = data;
41 const u32 soff = outp->or * 0x800;
42 u32 stat;
43 int ret;
44
45 nv_ioctl(object, "disp sor pwr size %d\n", size);
46 if (nvif_unpack(args->v0, 0, 0, false)) {
47 nv_ioctl(object, "disp sor pwr vers %d state %d\n",
48 args->v0.version, args->v0.state);
49 stat = !!args->v0.state;
50 } else
51 return ret;
52
39 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); 53 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
40 nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); 54 nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
41 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); 55 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
42 nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); 56 nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
43 return 0; 57 return 0;
44} 58}
45
46int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{
49 struct nv50_disp_priv *priv = (void *)object->engine;
50 const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
51 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
52 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
53 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
54 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
55 struct nvkm_output *outp = NULL, *temp;
56 u32 data;
57 int ret = -EINVAL;
58
59 if (size < sizeof(u32))
60 return -EINVAL;
61 data = *(u32 *)args;
62
63 list_for_each_entry(temp, &priv->base.outp, head) {
64 if ((temp->info.hasht & 0xff) == type &&
65 (temp->info.hashm & mask) == mask) {
66 outp = temp;
67 break;
68 }
69 }
70
71 switch (mthd & ~0x3f) {
72 case NV50_DISP_SOR_PWR:
73 ret = priv->sor.power(priv, or, data);
74 break;
75 case NVA3_DISP_SOR_HDA_ELD:
76 ret = priv->sor.hda_eld(priv, or, args, size);
77 break;
78 case NV84_DISP_SOR_HDMI_PWR:
79 ret = priv->sor.hdmi(priv, head, or, data);
80 break;
81 case NV50_DISP_SOR_LVDS_SCRIPT:
82 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
83 ret = 0;
84 break;
85 case NV94_DISP_SOR_DP_PWR:
86 if (outp) {
87 struct nvkm_output_dp *outpdp = (void *)outp;
88 switch (data) {
89 case NV94_DISP_SOR_DP_PWR_STATE_OFF:
90 nouveau_event_put(outpdp->irq);
91 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
92 ->lnk_pwr(outpdp, 0);
93 atomic_set(&outpdp->lt.done, 0);
94 break;
95 case NV94_DISP_SOR_DP_PWR_STATE_ON:
96 nvkm_output_dp_train(&outpdp->base, 0, true);
97 break;
98 default:
99 return -EINVAL;
100 }
101 }
102 break;
103 default:
104 BUG_ON(1);
105 }
106
107 return ret;
108}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 05487cda84a8..39f85d627336 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27 26
28#include <subdev/bios.h> 27#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 28#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 97f0e9cd3d40..7b7bbc3e459e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27 26
28#include <subdev/bios.h> 27#include <subdev/bios.h>
29#include <subdev/bios/dcb.h> 28#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index 5103e88d1877..e1500f77a56a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -23,98 +23,143 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/class.h> 26#include <core/client.h>
27#include <nvif/unpack.h>
28#include <nvif/class.h>
27 29
28#include <subdev/fb.h> 30#include <subdev/fb.h>
29#include <engine/dmaobj.h> 31#include <subdev/instmem.h>
32
33#include "priv.h"
30 34
31static int 35static int
32nouveau_dmaobj_ctor(struct nouveau_object *parent, 36nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
37 struct nouveau_gpuobj **pgpuobj)
38{
39 const struct nvkm_dmaeng_impl *impl = (void *)
40 nv_oclass(nv_object(dmaobj)->engine);
41 int ret = 0;
42
43 if (nv_object(dmaobj) == parent) { /* ctor bind */
44 if (nv_mclass(parent->parent) == NV_DEVICE) {
45 /* delayed, or no, binding */
46 return 0;
47 }
48 ret = impl->bind(dmaobj, parent, pgpuobj);
49 if (ret == 0)
50 nouveau_object_ref(NULL, &parent);
51 return ret;
52 }
53
54 return impl->bind(dmaobj, parent, pgpuobj);
55}
56
57int
58nvkm_dmaobj_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine, 59 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, void *data, u32 size, 60 struct nouveau_oclass *oclass, void **pdata, u32 *psize,
35 struct nouveau_object **pobject) 61 int length, void **pobject)
36{ 62{
37 struct nouveau_dmaeng *dmaeng = (void *)engine; 63 union {
64 struct nv_dma_v0 v0;
65 } *args = *pdata;
66 struct nouveau_instmem *instmem = nouveau_instmem(parent);
67 struct nouveau_client *client = nouveau_client(parent);
68 struct nouveau_device *device = nv_device(parent);
69 struct nouveau_fb *pfb = nouveau_fb(parent);
38 struct nouveau_dmaobj *dmaobj; 70 struct nouveau_dmaobj *dmaobj;
39 struct nouveau_gpuobj *gpuobj; 71 void *data = *pdata;
40 struct nv_dma_class *args = data; 72 u32 size = *psize;
41 int ret; 73 int ret;
42 74
43 if (size < sizeof(*args)) 75 ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
44 return -EINVAL; 76 dmaobj = *pobject;
45
46 ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
47 *pobject = nv_object(dmaobj);
48 if (ret) 77 if (ret)
49 return ret; 78 return ret;
50 79
51 switch (args->flags & NV_DMA_TARGET_MASK) { 80 nv_ioctl(parent, "create dma size %d\n", *psize);
52 case NV_DMA_TARGET_VM: 81 if (nvif_unpack(args->v0, 0, 0, true)) {
82 nv_ioctl(parent, "create dma vers %d target %d access %d "
83 "start %016llx limit %016llx\n",
84 args->v0.version, args->v0.target, args->v0.access,
85 args->v0.start, args->v0.limit);
86 dmaobj->target = args->v0.target;
87 dmaobj->access = args->v0.access;
88 dmaobj->start = args->v0.start;
89 dmaobj->limit = args->v0.limit;
90 } else
91 return ret;
92
93 *pdata = data;
94 *psize = size;
95
96 if (dmaobj->start > dmaobj->limit)
97 return -EINVAL;
98
99 switch (dmaobj->target) {
100 case NV_DMA_V0_TARGET_VM:
53 dmaobj->target = NV_MEM_TARGET_VM; 101 dmaobj->target = NV_MEM_TARGET_VM;
54 break; 102 break;
55 case NV_DMA_TARGET_VRAM: 103 case NV_DMA_V0_TARGET_VRAM:
104 if (!client->super) {
105 if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
106 return -EACCES;
107 if (device->card_type >= NV_50)
108 return -EACCES;
109 }
56 dmaobj->target = NV_MEM_TARGET_VRAM; 110 dmaobj->target = NV_MEM_TARGET_VRAM;
57 break; 111 break;
58 case NV_DMA_TARGET_PCI: 112 case NV_DMA_V0_TARGET_PCI:
113 if (!client->super)
114 return -EACCES;
59 dmaobj->target = NV_MEM_TARGET_PCI; 115 dmaobj->target = NV_MEM_TARGET_PCI;
60 break; 116 break;
61 case NV_DMA_TARGET_PCI_US: 117 case NV_DMA_V0_TARGET_PCI_US:
62 case NV_DMA_TARGET_AGP: 118 case NV_DMA_V0_TARGET_AGP:
119 if (!client->super)
120 return -EACCES;
63 dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; 121 dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
64 break; 122 break;
65 default: 123 default:
66 return -EINVAL; 124 return -EINVAL;
67 } 125 }
68 126
69 switch (args->flags & NV_DMA_ACCESS_MASK) { 127 switch (dmaobj->access) {
70 case NV_DMA_ACCESS_VM: 128 case NV_DMA_V0_ACCESS_VM:
71 dmaobj->access = NV_MEM_ACCESS_VM; 129 dmaobj->access = NV_MEM_ACCESS_VM;
72 break; 130 break;
73 case NV_DMA_ACCESS_RD: 131 case NV_DMA_V0_ACCESS_RD:
74 dmaobj->access = NV_MEM_ACCESS_RO; 132 dmaobj->access = NV_MEM_ACCESS_RO;
75 break; 133 break;
76 case NV_DMA_ACCESS_WR: 134 case NV_DMA_V0_ACCESS_WR:
77 dmaobj->access = NV_MEM_ACCESS_WO; 135 dmaobj->access = NV_MEM_ACCESS_WO;
78 break; 136 break;
79 case NV_DMA_ACCESS_RDWR: 137 case NV_DMA_V0_ACCESS_RDWR:
80 dmaobj->access = NV_MEM_ACCESS_RW; 138 dmaobj->access = NV_MEM_ACCESS_RW;
81 break; 139 break;
82 default: 140 default:
83 return -EINVAL; 141 return -EINVAL;
84 } 142 }
85 143
86 dmaobj->start = args->start;
87 dmaobj->limit = args->limit;
88 dmaobj->conf0 = args->conf0;
89
90 switch (nv_mclass(parent)) {
91 case NV_DEVICE_CLASS:
92 /* delayed, or no, binding */
93 break;
94 default:
95 ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
96 if (ret == 0) {
97 nouveau_object_ref(NULL, pobject);
98 *pobject = nv_object(gpuobj);
99 }
100 break;
101 }
102
103 return ret; 144 return ret;
104} 145}
105 146
106static struct nouveau_ofuncs 147int
107nouveau_dmaobj_ofuncs = { 148_nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
108 .ctor = nouveau_dmaobj_ctor, 149 struct nouveau_oclass *oclass, void *data, u32 size,
109 .dtor = nouveau_object_destroy, 150 struct nouveau_object **pobject)
110 .init = nouveau_object_init, 151{
111 .fini = nouveau_object_fini, 152 const struct nvkm_dmaeng_impl *impl = (void *)oclass;
112}; 153 struct nouveau_dmaeng *dmaeng;
113 154 int ret;
114struct nouveau_oclass 155
115nouveau_dmaobj_sclass[] = { 156 ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ",
116 { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, 157 "dmaobj", &dmaeng);
117 { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, 158 *pobject = nv_object(dmaeng);
118 { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, 159 if (ret)
119 {} 160 return ret;
120}; 161
162 nv_engine(dmaeng)->sclass = impl->sclass;
163 dmaeng->bind = nvkm_dmaobj_bind;
164 return 0;
165}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 027d8217c0fa..20c9dbfe3b2e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -23,121 +23,143 @@
23 */ 23 */
24 24
25#include <core/gpuobj.h> 25#include <core/gpuobj.h>
26#include <core/class.h> 26#include <nvif/class.h>
27 27
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <subdev/vm/nv04.h> 29#include <subdev/vm/nv04.h>
30 30
31#include <engine/dmaobj.h> 31#include "priv.h"
32 32
33struct nv04_dmaeng_priv { 33struct nv04_dmaobj_priv {
34 struct nouveau_dmaeng base; 34 struct nouveau_dmaobj base;
35 bool clone;
36 u32 flags0;
37 u32 flags2;
35}; 38};
36 39
37static int 40static int
38nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 41nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
39 struct nouveau_object *parent, 42 struct nouveau_object *parent,
40 struct nouveau_dmaobj *dmaobj,
41 struct nouveau_gpuobj **pgpuobj) 43 struct nouveau_gpuobj **pgpuobj)
42{ 44{
43 struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng); 45 struct nv04_dmaobj_priv *priv = (void *)dmaobj;
44 struct nouveau_gpuobj *gpuobj; 46 struct nouveau_gpuobj *gpuobj;
45 u32 flags0 = nv_mclass(dmaobj); 47 u64 offset = priv->base.start & 0xfffff000;
46 u32 flags2 = 0x00000000; 48 u64 adjust = priv->base.start & 0x00000fff;
47 u64 offset = dmaobj->start & 0xfffff000; 49 u32 length = priv->base.limit - priv->base.start;
48 u64 adjust = dmaobj->start & 0x00000fff;
49 u32 length = dmaobj->limit - dmaobj->start;
50 int ret; 50 int ret;
51 51
52 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { 52 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
53 switch (nv_mclass(parent->parent)) { 53 switch (nv_mclass(parent->parent)) {
54 case NV03_CHANNEL_DMA_CLASS: 54 case NV03_CHANNEL_DMA:
55 case NV10_CHANNEL_DMA_CLASS: 55 case NV10_CHANNEL_DMA:
56 case NV17_CHANNEL_DMA_CLASS: 56 case NV17_CHANNEL_DMA:
57 case NV40_CHANNEL_DMA_CLASS: 57 case NV40_CHANNEL_DMA:
58 break; 58 break;
59 default: 59 default:
60 return -EINVAL; 60 return -EINVAL;
61 } 61 }
62 } 62 }
63 63
64 if (dmaobj->target == NV_MEM_TARGET_VM) { 64 if (priv->clone) {
65 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { 65 struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj);
66 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; 66 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
67 if (!dmaobj->start) 67 if (!dmaobj->start)
68 return nouveau_gpuobj_dup(parent, pgt, pgpuobj); 68 return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
69 offset = nv_ro32(pgt, 8 + (offset >> 10)); 69 offset = nv_ro32(pgt, 8 + (offset >> 10));
70 offset &= 0xfffff000; 70 offset &= 0xfffff000;
71 } 71 }
72
73 ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
74 *pgpuobj = gpuobj;
75 if (ret == 0) {
76 nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
77 nv_wo32(*pgpuobj, 0x04, length);
78 nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
79 nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
80 }
81
82 return ret;
83}
84
85static int
86nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
87 struct nouveau_oclass *oclass, void *data, u32 size,
88 struct nouveau_object **pobject)
89{
90 struct nouveau_dmaeng *dmaeng = (void *)engine;
91 struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine);
92 struct nv04_dmaobj_priv *priv;
93 int ret;
94
95 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
96 *pobject = nv_object(priv);
97 if (ret || (ret = -ENOSYS, size))
98 return ret;
72 99
73 dmaobj->target = NV_MEM_TARGET_PCI; 100 if (priv->base.target == NV_MEM_TARGET_VM) {
74 dmaobj->access = NV_MEM_ACCESS_RW; 101 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass)
102 priv->clone = true;
103 priv->base.target = NV_MEM_TARGET_PCI;
104 priv->base.access = NV_MEM_ACCESS_RW;
75 } 105 }
76 106
77 switch (dmaobj->target) { 107 priv->flags0 = nv_mclass(priv);
108 switch (priv->base.target) {
78 case NV_MEM_TARGET_VRAM: 109 case NV_MEM_TARGET_VRAM:
79 flags0 |= 0x00003000; 110 priv->flags0 |= 0x00003000;
80 break; 111 break;
81 case NV_MEM_TARGET_PCI: 112 case NV_MEM_TARGET_PCI:
82 flags0 |= 0x00023000; 113 priv->flags0 |= 0x00023000;
83 break; 114 break;
84 case NV_MEM_TARGET_PCI_NOSNOOP: 115 case NV_MEM_TARGET_PCI_NOSNOOP:
85 flags0 |= 0x00033000; 116 priv->flags0 |= 0x00033000;
86 break; 117 break;
87 default: 118 default:
88 return -EINVAL; 119 return -EINVAL;
89 } 120 }
90 121
91 switch (dmaobj->access) { 122 switch (priv->base.access) {
92 case NV_MEM_ACCESS_RO: 123 case NV_MEM_ACCESS_RO:
93 flags0 |= 0x00004000; 124 priv->flags0 |= 0x00004000;
94 break; 125 break;
95 case NV_MEM_ACCESS_WO: 126 case NV_MEM_ACCESS_WO:
96 flags0 |= 0x00008000; 127 priv->flags0 |= 0x00008000;
97 case NV_MEM_ACCESS_RW: 128 case NV_MEM_ACCESS_RW:
98 flags2 |= 0x00000002; 129 priv->flags2 |= 0x00000002;
99 break; 130 break;
100 default: 131 default:
101 return -EINVAL; 132 return -EINVAL;
102 } 133 }
103 134
104 ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); 135 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
105 *pgpuobj = gpuobj;
106 if (ret == 0) {
107 nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
108 nv_wo32(*pgpuobj, 0x04, length);
109 nv_wo32(*pgpuobj, 0x08, flags2 | offset);
110 nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
111 }
112
113 return ret;
114} 136}
115 137
116static int 138static struct nouveau_ofuncs
117nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 139nv04_dmaobj_ofuncs = {
118 struct nouveau_oclass *oclass, void *data, u32 size, 140 .ctor = nv04_dmaobj_ctor,
119 struct nouveau_object **pobject) 141 .dtor = _nvkm_dmaobj_dtor,
120{ 142 .init = _nvkm_dmaobj_init,
121 struct nv04_dmaeng_priv *priv; 143 .fini = _nvkm_dmaobj_fini,
122 int ret; 144};
123
124 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
125 *pobject = nv_object(priv);
126 if (ret)
127 return ret;
128 145
129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass; 146static struct nouveau_oclass
130 priv->base.bind = nv04_dmaobj_bind; 147nv04_dmaeng_sclass[] = {
131 return 0; 148 { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
132} 149 { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
150 { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
151 {}
152};
133 153
134struct nouveau_oclass 154struct nouveau_oclass *
135nv04_dmaeng_oclass = { 155nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
136 .handle = NV_ENGINE(DMAOBJ, 0x04), 156 .base.handle = NV_ENGINE(DMAOBJ, 0x04),
137 .ofuncs = &(struct nouveau_ofuncs) { 157 .base.ofuncs = &(struct nouveau_ofuncs) {
138 .ctor = nv04_dmaeng_ctor, 158 .ctor = _nvkm_dmaeng_ctor,
139 .dtor = _nouveau_dmaeng_dtor, 159 .dtor = _nvkm_dmaeng_dtor,
140 .init = _nouveau_dmaeng_init, 160 .init = _nvkm_dmaeng_init,
141 .fini = _nouveau_dmaeng_fini, 161 .fini = _nvkm_dmaeng_fini,
142 }, 162 },
143}; 163 .sclass = nv04_dmaeng_sclass,
164 .bind = nv04_dmaobj_bind,
165}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 750183f7c057..a740ddba2ee2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -22,140 +22,176 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/gpuobj.h> 26#include <core/gpuobj.h>
26#include <core/class.h> 27#include <nvif/unpack.h>
28#include <nvif/class.h>
27 29
28#include <subdev/fb.h> 30#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30 31
31struct nv50_dmaeng_priv { 32#include "priv.h"
32 struct nouveau_dmaeng base; 33
34struct nv50_dmaobj_priv {
35 struct nouveau_dmaobj base;
36 u32 flags0;
37 u32 flags5;
33}; 38};
34 39
35static int 40static int
36nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 41nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
37 struct nouveau_object *parent, 42 struct nouveau_object *parent,
38 struct nouveau_dmaobj *dmaobj,
39 struct nouveau_gpuobj **pgpuobj) 43 struct nouveau_gpuobj **pgpuobj)
40{ 44{
41 u32 flags0 = nv_mclass(dmaobj); 45 struct nv50_dmaobj_priv *priv = (void *)dmaobj;
42 u32 flags5 = 0x00000000;
43 int ret; 46 int ret;
44 47
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { 48 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) { 49 switch (nv_mclass(parent->parent)) {
47 case NV50_CHANNEL_DMA_CLASS: 50 case NV40_CHANNEL_DMA:
48 case NV84_CHANNEL_DMA_CLASS: 51 case NV50_CHANNEL_GPFIFO:
49 case NV50_CHANNEL_IND_CLASS: 52 case G82_CHANNEL_GPFIFO:
50 case NV84_CHANNEL_IND_CLASS: 53 case NV50_DISP_CORE_CHANNEL_DMA:
51 case NV50_DISP_MAST_CLASS: 54 case G82_DISP_CORE_CHANNEL_DMA:
52 case NV84_DISP_MAST_CLASS: 55 case GT206_DISP_CORE_CHANNEL_DMA:
53 case NV94_DISP_MAST_CLASS: 56 case GT200_DISP_CORE_CHANNEL_DMA:
54 case NVA0_DISP_MAST_CLASS: 57 case GT214_DISP_CORE_CHANNEL_DMA:
55 case NVA3_DISP_MAST_CLASS: 58 case NV50_DISP_BASE_CHANNEL_DMA:
56 case NV50_DISP_SYNC_CLASS: 59 case G82_DISP_BASE_CHANNEL_DMA:
57 case NV84_DISP_SYNC_CLASS: 60 case GT200_DISP_BASE_CHANNEL_DMA:
58 case NV94_DISP_SYNC_CLASS: 61 case GT214_DISP_BASE_CHANNEL_DMA:
59 case NVA0_DISP_SYNC_CLASS: 62 case NV50_DISP_OVERLAY_CHANNEL_DMA:
60 case NVA3_DISP_SYNC_CLASS: 63 case G82_DISP_OVERLAY_CHANNEL_DMA:
61 case NV50_DISP_OVLY_CLASS: 64 case GT200_DISP_OVERLAY_CHANNEL_DMA:
62 case NV84_DISP_OVLY_CLASS: 65 case GT214_DISP_OVERLAY_CHANNEL_DMA:
63 case NV94_DISP_OVLY_CLASS:
64 case NVA0_DISP_OVLY_CLASS:
65 case NVA3_DISP_OVLY_CLASS:
66 break; 66 break;
67 default: 67 default:
68 return -EINVAL; 68 return -EINVAL;
69 } 69 }
70 } 70 }
71 71
72 if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { 72 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
73 if (dmaobj->target == NV_MEM_TARGET_VM) { 73 if (ret == 0) {
74 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; 74 nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
75 dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; 75 nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
76 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; 76 nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
77 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; 77 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
78 upper_32_bits(priv->base.start));
79 nv_wo32(*pgpuobj, 0x10, 0x00000000);
80 nv_wo32(*pgpuobj, 0x14, priv->flags5);
81 }
82
83 return ret;
84}
85
86static int
87nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_dmaeng *dmaeng = (void *)engine;
92 union {
93 struct nv50_dma_v0 v0;
94 } *args;
95 struct nv50_dmaobj_priv *priv;
96 u32 user, part, comp, kind;
97 int ret;
98
99 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
100 *pobject = nv_object(priv);
101 if (ret)
102 return ret;
103 args = data;
104
105 nv_ioctl(parent, "create nv50 dma size %d\n", size);
106 if (nvif_unpack(args->v0, 0, 0, false)) {
107 nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
108 "comp %d kind %02x\n", args->v0.version,
109 args->v0.priv, args->v0.part, args->v0.comp,
110 args->v0.kind);
111 user = args->v0.priv;
112 part = args->v0.part;
113 comp = args->v0.comp;
114 kind = args->v0.kind;
115 } else
116 if (size == 0) {
117 if (priv->base.target != NV_MEM_TARGET_VM) {
118 user = NV50_DMA_V0_PRIV_US;
119 part = NV50_DMA_V0_PART_256;
120 comp = NV50_DMA_V0_COMP_NONE;
121 kind = NV50_DMA_V0_KIND_PITCH;
78 } else { 122 } else {
79 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; 123 user = NV50_DMA_V0_PRIV_VM;
80 dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; 124 part = NV50_DMA_V0_PART_VM;
81 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; 125 comp = NV50_DMA_V0_COMP_VM;
82 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; 126 kind = NV50_DMA_V0_KIND_VM;
83 } 127 }
84 } 128 } else
129 return ret;
85 130
86 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; 131 if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
87 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; 132 return -EINVAL;
88 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); 133 priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
89 flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); 134 priv->flags5 = (part << 16);
90 135
91 switch (dmaobj->target) { 136 switch (priv->base.target) {
92 case NV_MEM_TARGET_VM: 137 case NV_MEM_TARGET_VM:
93 flags0 |= 0x00000000; 138 priv->flags0 |= 0x00000000;
94 break; 139 break;
95 case NV_MEM_TARGET_VRAM: 140 case NV_MEM_TARGET_VRAM:
96 flags0 |= 0x00010000; 141 priv->flags0 |= 0x00010000;
97 break; 142 break;
98 case NV_MEM_TARGET_PCI: 143 case NV_MEM_TARGET_PCI:
99 flags0 |= 0x00020000; 144 priv->flags0 |= 0x00020000;
100 break; 145 break;
101 case NV_MEM_TARGET_PCI_NOSNOOP: 146 case NV_MEM_TARGET_PCI_NOSNOOP:
102 flags0 |= 0x00030000; 147 priv->flags0 |= 0x00030000;
103 break; 148 break;
104 default: 149 default:
105 return -EINVAL; 150 return -EINVAL;
106 } 151 }
107 152
108 switch (dmaobj->access) { 153 switch (priv->base.access) {
109 case NV_MEM_ACCESS_VM: 154 case NV_MEM_ACCESS_VM:
110 break; 155 break;
111 case NV_MEM_ACCESS_RO: 156 case NV_MEM_ACCESS_RO:
112 flags0 |= 0x00040000; 157 priv->flags0 |= 0x00040000;
113 break; 158 break;
114 case NV_MEM_ACCESS_WO: 159 case NV_MEM_ACCESS_WO:
115 case NV_MEM_ACCESS_RW: 160 case NV_MEM_ACCESS_RW:
116 flags0 |= 0x00080000; 161 priv->flags0 |= 0x00080000;
117 break; 162 break;
163 default:
164 return -EINVAL;
118 } 165 }
119 166
120 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); 167 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
121 if (ret == 0) {
122 nv_wo32(*pgpuobj, 0x00, flags0);
123 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
124 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
125 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
126 upper_32_bits(dmaobj->start));
127 nv_wo32(*pgpuobj, 0x10, 0x00000000);
128 nv_wo32(*pgpuobj, 0x14, flags5);
129 }
130
131 return ret;
132} 168}
133 169
134static int 170static struct nouveau_ofuncs
135nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 171nv50_dmaobj_ofuncs = {
136 struct nouveau_oclass *oclass, void *data, u32 size, 172 .ctor = nv50_dmaobj_ctor,
137 struct nouveau_object **pobject) 173 .dtor = _nvkm_dmaobj_dtor,
138{ 174 .init = _nvkm_dmaobj_init,
139 struct nv50_dmaeng_priv *priv; 175 .fini = _nvkm_dmaobj_fini,
140 int ret; 176};
141
142 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
143 *pobject = nv_object(priv);
144 if (ret)
145 return ret;
146 177
147 nv_engine(priv)->sclass = nouveau_dmaobj_sclass; 178static struct nouveau_oclass
148 priv->base.bind = nv50_dmaobj_bind; 179nv50_dmaeng_sclass[] = {
149 return 0; 180 { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
150} 181 { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
182 { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
183 {}
184};
151 185
152struct nouveau_oclass 186struct nouveau_oclass *
153nv50_dmaeng_oclass = { 187nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
154 .handle = NV_ENGINE(DMAOBJ, 0x50), 188 .base.handle = NV_ENGINE(DMAOBJ, 0x50),
155 .ofuncs = &(struct nouveau_ofuncs) { 189 .base.ofuncs = &(struct nouveau_ofuncs) {
156 .ctor = nv50_dmaeng_ctor, 190 .ctor = _nvkm_dmaeng_ctor,
157 .dtor = _nouveau_dmaeng_dtor, 191 .dtor = _nvkm_dmaeng_dtor,
158 .init = _nouveau_dmaeng_init, 192 .init = _nvkm_dmaeng_init,
159 .fini = _nouveau_dmaeng_fini, 193 .fini = _nvkm_dmaeng_fini,
160 }, 194 },
161}; 195 .sclass = nv50_dmaeng_sclass,
196 .bind = nv50_dmaobj_bind,
197}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index cd3970d03b80..88ec33b20048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,32 +22,35 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/device.h> 26#include <core/device.h>
26#include <core/gpuobj.h> 27#include <core/gpuobj.h>
27#include <core/class.h> 28#include <nvif/unpack.h>
29#include <nvif/class.h>
28 30
29#include <subdev/fb.h> 31#include <subdev/fb.h>
30#include <engine/dmaobj.h>
31 32
32struct nvc0_dmaeng_priv { 33#include "priv.h"
33 struct nouveau_dmaeng base; 34
35struct nvc0_dmaobj_priv {
36 struct nouveau_dmaobj base;
37 u32 flags0;
38 u32 flags5;
34}; 39};
35 40
36static int 41static int
37nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 42nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
38 struct nouveau_object *parent, 43 struct nouveau_object *parent,
39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj) 44 struct nouveau_gpuobj **pgpuobj)
41{ 45{
42 u32 flags0 = nv_mclass(dmaobj); 46 struct nvc0_dmaobj_priv *priv = (void *)dmaobj;
43 u32 flags5 = 0x00000000;
44 int ret; 47 int ret;
45 48
46 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { 49 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
47 switch (nv_mclass(parent->parent)) { 50 switch (nv_mclass(parent->parent)) {
48 case NVA3_DISP_MAST_CLASS: 51 case GT214_DISP_CORE_CHANNEL_DMA:
49 case NVA3_DISP_SYNC_CLASS: 52 case GT214_DISP_BASE_CHANNEL_DMA:
50 case NVA3_DISP_OVLY_CLASS: 53 case GT214_DISP_OVERLAY_CHANNEL_DMA:
51 break; 54 break;
52 default: 55 default:
53 return -EINVAL; 56 return -EINVAL;
@@ -55,89 +58,122 @@ nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
55 } else 58 } else
56 return 0; 59 return 0;
57 60
58 if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { 61 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
59 if (dmaobj->target == NV_MEM_TARGET_VM) { 62 if (ret == 0) {
60 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; 63 nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
61 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; 64 nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
65 nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
66 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
67 upper_32_bits(priv->base.start));
68 nv_wo32(*pgpuobj, 0x10, 0x00000000);
69 nv_wo32(*pgpuobj, 0x14, priv->flags5);
70 }
71
72 return ret;
73}
74
75static int
76nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nouveau_oclass *oclass, void *data, u32 size,
78 struct nouveau_object **pobject)
79{
80 struct nouveau_dmaeng *dmaeng = (void *)engine;
81 union {
82 struct gf100_dma_v0 v0;
83 } *args;
84 struct nvc0_dmaobj_priv *priv;
85 u32 kind, user, unkn;
86 int ret;
87
88 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92 args = data;
93
94 nv_ioctl(parent, "create gf100 dma size %d\n", size);
95 if (nvif_unpack(args->v0, 0, 0, false)) {
96 nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
97 args->v0.version, args->v0.priv, args->v0.kind);
98 kind = args->v0.kind;
99 user = args->v0.priv;
100 unkn = 0;
101 } else
102 if (size == 0) {
103 if (priv->base.target != NV_MEM_TARGET_VM) {
104 kind = GF100_DMA_V0_KIND_PITCH;
105 user = GF100_DMA_V0_PRIV_US;
106 unkn = 2;
62 } else { 107 } else {
63 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; 108 kind = GF100_DMA_V0_KIND_VM;
64 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; 109 user = GF100_DMA_V0_PRIV_VM;
65 dmaobj->conf0 |= 0x00020000; 110 unkn = 0;
66 } 111 }
67 } 112 } else
113 return ret;
68 114
69 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; 115 if (user > 2)
70 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); 116 return -EINVAL;
71 flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); 117 priv->flags0 |= (kind << 22) | (user << 20);
118 priv->flags5 |= (unkn << 16);
72 119
73 switch (dmaobj->target) { 120 switch (priv->base.target) {
74 case NV_MEM_TARGET_VM: 121 case NV_MEM_TARGET_VM:
75 flags0 |= 0x00000000; 122 priv->flags0 |= 0x00000000;
76 break; 123 break;
77 case NV_MEM_TARGET_VRAM: 124 case NV_MEM_TARGET_VRAM:
78 flags0 |= 0x00010000; 125 priv->flags0 |= 0x00010000;
79 break; 126 break;
80 case NV_MEM_TARGET_PCI: 127 case NV_MEM_TARGET_PCI:
81 flags0 |= 0x00020000; 128 priv->flags0 |= 0x00020000;
82 break; 129 break;
83 case NV_MEM_TARGET_PCI_NOSNOOP: 130 case NV_MEM_TARGET_PCI_NOSNOOP:
84 flags0 |= 0x00030000; 131 priv->flags0 |= 0x00030000;
85 break; 132 break;
86 default: 133 default:
87 return -EINVAL; 134 return -EINVAL;
88 } 135 }
89 136
90 switch (dmaobj->access) { 137 switch (priv->base.access) {
91 case NV_MEM_ACCESS_VM: 138 case NV_MEM_ACCESS_VM:
92 break; 139 break;
93 case NV_MEM_ACCESS_RO: 140 case NV_MEM_ACCESS_RO:
94 flags0 |= 0x00040000; 141 priv->flags0 |= 0x00040000;
95 break; 142 break;
96 case NV_MEM_ACCESS_WO: 143 case NV_MEM_ACCESS_WO:
97 case NV_MEM_ACCESS_RW: 144 case NV_MEM_ACCESS_RW:
98 flags0 |= 0x00080000; 145 priv->flags0 |= 0x00080000;
99 break; 146 break;
100 } 147 }
101 148
102 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); 149 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
103 if (ret == 0) {
104 nv_wo32(*pgpuobj, 0x00, flags0);
105 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
106 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
107 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
108 upper_32_bits(dmaobj->start));
109 nv_wo32(*pgpuobj, 0x10, 0x00000000);
110 nv_wo32(*pgpuobj, 0x14, flags5);
111 }
112
113 return ret;
114} 150}
115 151
116static int 152static struct nouveau_ofuncs
117nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 153nvc0_dmaobj_ofuncs = {
118 struct nouveau_oclass *oclass, void *data, u32 size, 154 .ctor = nvc0_dmaobj_ctor,
119 struct nouveau_object **pobject) 155 .dtor = _nvkm_dmaobj_dtor,
120{ 156 .init = _nvkm_dmaobj_init,
121 struct nvc0_dmaeng_priv *priv; 157 .fini = _nvkm_dmaobj_fini,
122 int ret; 158};
123
124 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
125 *pobject = nv_object(priv);
126 if (ret)
127 return ret;
128 159
129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass; 160static struct nouveau_oclass
130 priv->base.bind = nvc0_dmaobj_bind; 161nvc0_dmaeng_sclass[] = {
131 return 0; 162 { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
132} 163 { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
164 { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
165 {}
166};
133 167
134struct nouveau_oclass 168struct nouveau_oclass *
135nvc0_dmaeng_oclass = { 169nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
136 .handle = NV_ENGINE(DMAOBJ, 0xc0), 170 .base.handle = NV_ENGINE(DMAOBJ, 0xc0),
137 .ofuncs = &(struct nouveau_ofuncs) { 171 .base.ofuncs = &(struct nouveau_ofuncs) {
138 .ctor = nvc0_dmaeng_ctor, 172 .ctor = _nvkm_dmaeng_ctor,
139 .dtor = _nouveau_dmaeng_dtor, 173 .dtor = _nvkm_dmaeng_dtor,
140 .init = _nouveau_dmaeng_init, 174 .init = _nvkm_dmaeng_init,
141 .fini = _nouveau_dmaeng_fini, 175 .fini = _nvkm_dmaeng_fini,
142 }, 176 },
143}; 177 .sclass = nvc0_dmaeng_sclass,
178 .bind = nvc0_dmaobj_bind,
179}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 1cfb3bb90131..3fc4f0b0eaca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -22,40 +22,40 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/device.h> 26#include <core/device.h>
26#include <core/gpuobj.h> 27#include <core/gpuobj.h>
27#include <core/class.h> 28#include <nvif/unpack.h>
29#include <nvif/class.h>
28 30
29#include <subdev/fb.h> 31#include <subdev/fb.h>
30#include <engine/dmaobj.h>
31 32
32struct nvd0_dmaeng_priv { 33#include "priv.h"
33 struct nouveau_dmaeng base; 34
35struct nvd0_dmaobj_priv {
36 struct nouveau_dmaobj base;
37 u32 flags0;
34}; 38};
35 39
36static int 40static int
37nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 41nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
38 struct nouveau_object *parent, 42 struct nouveau_object *parent,
39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj) 43 struct nouveau_gpuobj **pgpuobj)
41{ 44{
42 u32 flags0 = 0x00000000; 45 struct nvd0_dmaobj_priv *priv = (void *)dmaobj;
43 int ret; 46 int ret;
44 47
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { 48 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) { 49 switch (nv_mclass(parent->parent)) {
47 case NVD0_DISP_MAST_CLASS: 50 case GF110_DISP_CORE_CHANNEL_DMA:
48 case NVD0_DISP_SYNC_CLASS: 51 case GK104_DISP_CORE_CHANNEL_DMA:
49 case NVD0_DISP_OVLY_CLASS: 52 case GK110_DISP_CORE_CHANNEL_DMA:
50 case NVE0_DISP_MAST_CLASS: 53 case GM107_DISP_CORE_CHANNEL_DMA:
51 case NVE0_DISP_SYNC_CLASS: 54 case GF110_DISP_BASE_CHANNEL_DMA:
52 case NVE0_DISP_OVLY_CLASS: 55 case GK104_DISP_BASE_CHANNEL_DMA:
53 case NVF0_DISP_MAST_CLASS: 56 case GK110_DISP_BASE_CHANNEL_DMA:
54 case NVF0_DISP_SYNC_CLASS: 57 case GF110_DISP_OVERLAY_CONTROL_DMA:
55 case NVF0_DISP_OVLY_CLASS: 58 case GK104_DISP_OVERLAY_CONTROL_DMA:
56 case GM107_DISP_MAST_CLASS:
57 case GM107_DISP_SYNC_CLASS:
58 case GM107_DISP_OVLY_CLASS:
59 break; 59 break;
60 default: 60 default:
61 return -EINVAL; 61 return -EINVAL;
@@ -63,33 +63,11 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
63 } else 63 } else
64 return 0; 64 return 0;
65 65
66 if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
67 if (dmaobj->target == NV_MEM_TARGET_VM) {
68 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
69 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
70 } else {
71 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
72 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
73 }
74 }
75
76 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
77 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
78
79 switch (dmaobj->target) {
80 case NV_MEM_TARGET_VRAM:
81 flags0 |= 0x00000009;
82 break;
83 default:
84 return -EINVAL;
85 break;
86 }
87
88 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); 66 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
89 if (ret == 0) { 67 if (ret == 0) {
90 nv_wo32(*pgpuobj, 0x00, flags0); 68 nv_wo32(*pgpuobj, 0x00, priv->flags0);
91 nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); 69 nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
92 nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); 70 nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
93 nv_wo32(*pgpuobj, 0x0c, 0x00000000); 71 nv_wo32(*pgpuobj, 0x0c, 0x00000000);
94 nv_wo32(*pgpuobj, 0x10, 0x00000000); 72 nv_wo32(*pgpuobj, 0x10, 0x00000000);
95 nv_wo32(*pgpuobj, 0x14, 0x00000000); 73 nv_wo32(*pgpuobj, 0x14, 0x00000000);
@@ -99,30 +77,91 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
99} 77}
100 78
101static int 79static int
102nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 80nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size, 81 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject) 82 struct nouveau_object **pobject)
105{ 83{
106 struct nvd0_dmaeng_priv *priv; 84 struct nouveau_dmaeng *dmaeng = (void *)engine;
85 union {
86 struct gf110_dma_v0 v0;
87 } *args;
88 struct nvd0_dmaobj_priv *priv;
89 u32 kind, page;
107 int ret; 90 int ret;
108 91
109 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); 92 ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
110 *pobject = nv_object(priv); 93 *pobject = nv_object(priv);
111 if (ret) 94 if (ret)
112 return ret; 95 return ret;
96 args = data;
113 97
114 nv_engine(priv)->sclass = nouveau_dmaobj_sclass; 98 nv_ioctl(parent, "create gf110 dma size %d\n", size);
115 priv->base.bind = nvd0_dmaobj_bind; 99 if (nvif_unpack(args->v0, 0, 0, false)) {
116 return 0; 100 nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
101 args->v0.version, args->v0.page, args->v0.kind);
102 kind = args->v0.kind;
103 page = args->v0.page;
104 } else
105 if (size == 0) {
106 if (priv->base.target != NV_MEM_TARGET_VM) {
107 kind = GF110_DMA_V0_KIND_PITCH;
108 page = GF110_DMA_V0_PAGE_SP;
109 } else {
110 kind = GF110_DMA_V0_KIND_VM;
111 page = GF110_DMA_V0_PAGE_LP;
112 }
113 } else
114 return ret;
115
116 if (page > 1)
117 return -EINVAL;
118 priv->flags0 = (kind << 20) | (page << 6);
119
120 switch (priv->base.target) {
121 case NV_MEM_TARGET_VRAM:
122 priv->flags0 |= 0x00000009;
123 break;
124 case NV_MEM_TARGET_VM:
125 case NV_MEM_TARGET_PCI:
126 case NV_MEM_TARGET_PCI_NOSNOOP:
127 /* XXX: don't currently know how to construct a real one
128 * of these. we only use them to represent pushbufs
129 * on these chipsets, and the classes that use them
130 * deal with the target themselves.
131 */
132 break;
133 default:
134 return -EINVAL;
135 }
136
137 return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
117} 138}
118 139
119struct nouveau_oclass 140static struct nouveau_ofuncs
120nvd0_dmaeng_oclass = { 141nvd0_dmaobj_ofuncs = {
121 .handle = NV_ENGINE(DMAOBJ, 0xd0), 142 .ctor = nvd0_dmaobj_ctor,
122 .ofuncs = &(struct nouveau_ofuncs) { 143 .dtor = _nvkm_dmaobj_dtor,
123 .ctor = nvd0_dmaeng_ctor, 144 .init = _nvkm_dmaobj_init,
124 .dtor = _nouveau_dmaeng_dtor, 145 .fini = _nvkm_dmaobj_fini,
125 .init = _nouveau_dmaeng_init,
126 .fini = _nouveau_dmaeng_fini,
127 },
128}; 146};
147
148static struct nouveau_oclass
149nvd0_dmaeng_sclass[] = {
150 { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
151 { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
152 { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
153 {}
154};
155
156struct nouveau_oclass *
157nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
158 .base.handle = NV_ENGINE(DMAOBJ, 0xd0),
159 .base.ofuncs = &(struct nouveau_ofuncs) {
160 .ctor = _nvkm_dmaeng_ctor,
161 .dtor = _nvkm_dmaeng_dtor,
162 .init = _nvkm_dmaeng_init,
163 .fini = _nvkm_dmaeng_fini,
164 },
165 .sclass = nvd0_dmaeng_sclass,
166 .bind = nvd0_dmaobj_bind,
167}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
new file mode 100644
index 000000000000..36f743866937
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
@@ -0,0 +1,30 @@
1#ifndef __NVKM_DMAOBJ_PRIV_H__
2#define __NVKM_DMAOBJ_PRIV_H__
3
4#include <engine/dmaobj.h>
5
6#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
7 nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
8
9int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
10 struct nouveau_oclass *, void **, u32 *,
11 int, void **);
12#define _nvkm_dmaobj_dtor nouveau_object_destroy
13#define _nvkm_dmaobj_init nouveau_object_init
14#define _nvkm_dmaobj_fini nouveau_object_fini
15
16int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *,
17 struct nouveau_oclass *, void *, u32,
18 struct nouveau_object **);
19#define _nvkm_dmaeng_dtor _nouveau_engine_dtor
20#define _nvkm_dmaeng_init _nouveau_engine_init
21#define _nvkm_dmaeng_fini _nouveau_engine_fini
22
23struct nvkm_dmaeng_impl {
24 struct nouveau_oclass base;
25 struct nouveau_oclass *sclass;
26 int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *,
27 struct nouveau_gpuobj **);
28};
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 56ed3d73bf8e..0f999fc45ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -26,11 +26,30 @@
26#include <core/object.h> 26#include <core/object.h>
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/event.h> 28#include <core/event.h>
29#include <core/class.h> 29#include <nvif/unpack.h>
30#include <nvif/class.h>
31#include <nvif/event.h>
30 32
31#include <engine/dmaobj.h> 33#include <engine/dmaobj.h>
32#include <engine/fifo.h> 34#include <engine/fifo.h>
33 35
36static int
37nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
38{
39 if (size == 0) {
40 notify->size = 0;
41 notify->types = 1;
42 notify->index = 0;
43 return 0;
44 }
45 return -ENOSYS;
46}
47
48static const struct nvkm_event_func
49nouveau_fifo_event_func = {
50 .ctor = nouveau_fifo_event_ctor,
51};
52
34int 53int
35nouveau_fifo_channel_create_(struct nouveau_object *parent, 54nouveau_fifo_channel_create_(struct nouveau_object *parent,
36 struct nouveau_object *engine, 55 struct nouveau_object *engine,
@@ -59,14 +78,14 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
59 78
60 dmaeng = (void *)chan->pushdma->base.engine; 79 dmaeng = (void *)chan->pushdma->base.engine;
61 switch (chan->pushdma->base.oclass->handle) { 80 switch (chan->pushdma->base.oclass->handle) {
62 case NV_DMA_FROM_MEMORY_CLASS: 81 case NV_DMA_FROM_MEMORY:
63 case NV_DMA_IN_MEMORY_CLASS: 82 case NV_DMA_IN_MEMORY:
64 break; 83 break;
65 default: 84 default:
66 return -EINVAL; 85 return -EINVAL;
67 } 86 }
68 87
69 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); 88 ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
70 if (ret) 89 if (ret)
71 return ret; 90 return ret;
72 91
@@ -85,15 +104,10 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
85 return -ENOSPC; 104 return -ENOSPC;
86 } 105 }
87 106
88 /* map fifo control registers */ 107 chan->addr = nv_device_resource_start(device, bar) +
89 chan->user = ioremap(nv_device_resource_start(device, bar) + addr + 108 addr + size * chan->chid;
90 (chan->chid * size), size);
91 if (!chan->user)
92 return -EFAULT;
93
94 nouveau_event_trigger(priv->cevent, 1, 0);
95
96 chan->size = size; 109 chan->size = size;
110 nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
97 return 0; 111 return 0;
98} 112}
99 113
@@ -103,7 +117,8 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
103 struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; 117 struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
104 unsigned long flags; 118 unsigned long flags;
105 119
106 iounmap(chan->user); 120 if (chan->user)
121 iounmap(chan->user);
107 122
108 spin_lock_irqsave(&priv->lock, flags); 123 spin_lock_irqsave(&priv->lock, flags);
109 priv->channel[chan->chid] = NULL; 124 priv->channel[chan->chid] = NULL;
@@ -121,10 +136,24 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
121 nouveau_fifo_channel_destroy(chan); 136 nouveau_fifo_channel_destroy(chan);
122} 137}
123 138
139int
140_nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size)
141{
142 struct nouveau_fifo_chan *chan = (void *)object;
143 *addr = chan->addr;
144 *size = chan->size;
145 return 0;
146}
147
124u32 148u32
125_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) 149_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
126{ 150{
127 struct nouveau_fifo_chan *chan = (void *)object; 151 struct nouveau_fifo_chan *chan = (void *)object;
152 if (unlikely(!chan->user)) {
153 chan->user = ioremap(chan->addr, chan->size);
154 if (WARN_ON_ONCE(chan->user == NULL))
155 return 0;
156 }
128 return ioread32_native(chan->user + addr); 157 return ioread32_native(chan->user + addr);
129} 158}
130 159
@@ -132,9 +161,57 @@ void
132_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) 161_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
133{ 162{
134 struct nouveau_fifo_chan *chan = (void *)object; 163 struct nouveau_fifo_chan *chan = (void *)object;
164 if (unlikely(!chan->user)) {
165 chan->user = ioremap(chan->addr, chan->size);
166 if (WARN_ON_ONCE(chan->user == NULL))
167 return;
168 }
135 iowrite32_native(data, chan->user + addr); 169 iowrite32_native(data, chan->user + addr);
136} 170}
137 171
172int
173nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify)
174{
175 union {
176 struct nvif_notify_uevent_req none;
177 } *req = data;
178 int ret;
179
180 if (nvif_unvers(req->none)) {
181 notify->size = sizeof(struct nvif_notify_uevent_rep);
182 notify->types = 1;
183 notify->index = 0;
184 }
185
186 return ret;
187}
188
189void
190nouveau_fifo_uevent(struct nouveau_fifo *fifo)
191{
192 struct nvif_notify_uevent_rep rep = {
193 };
194 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
195}
196
197int
198_nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type,
199 struct nvkm_event **event)
200{
201 struct nouveau_fifo *fifo = (void *)object->engine;
202 switch (type) {
203 case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
204 if (nv_mclass(object) >= G82_CHANNEL_DMA) {
205 *event = &fifo->uevent;
206 return 0;
207 }
208 break;
209 default:
210 break;
211 }
212 return -EINVAL;
213}
214
138static int 215static int
139nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) 216nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
140{ 217{
@@ -168,8 +245,8 @@ void
168nouveau_fifo_destroy(struct nouveau_fifo *priv) 245nouveau_fifo_destroy(struct nouveau_fifo *priv)
169{ 246{
170 kfree(priv->channel); 247 kfree(priv->channel);
171 nouveau_event_destroy(&priv->uevent); 248 nvkm_event_fini(&priv->uevent);
172 nouveau_event_destroy(&priv->cevent); 249 nvkm_event_fini(&priv->cevent);
173 nouveau_engine_destroy(&priv->base); 250 nouveau_engine_destroy(&priv->base);
174} 251}
175 252
@@ -194,11 +271,7 @@ nouveau_fifo_create_(struct nouveau_object *parent,
194 if (!priv->channel) 271 if (!priv->channel)
195 return -ENOMEM; 272 return -ENOMEM;
196 273
197 ret = nouveau_event_create(1, 1, &priv->cevent); 274 ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent);
198 if (ret)
199 return ret;
200
201 ret = nouveau_event_create(1, 1, &priv->uevent);
202 if (ret) 275 if (ret)
203 return ret; 276 return ret;
204 277
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index c61b16a63884..5ae6a43893b5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27#include <core/engctx.h> 28#include <core/engctx.h>
28#include <core/namedb.h> 29#include <core/namedb.h>
29#include <core/handle.h> 30#include <core/handle.h>
@@ -117,16 +118,23 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
117 struct nouveau_oclass *oclass, void *data, u32 size, 118 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject) 119 struct nouveau_object **pobject)
119{ 120{
121 union {
122 struct nv03_channel_dma_v0 v0;
123 } *args = data;
120 struct nv04_fifo_priv *priv = (void *)engine; 124 struct nv04_fifo_priv *priv = (void *)engine;
121 struct nv04_fifo_chan *chan; 125 struct nv04_fifo_chan *chan;
122 struct nv03_channel_dma_class *args = data;
123 int ret; 126 int ret;
124 127
125 if (size < sizeof(*args)) 128 nv_ioctl(parent, "create channel dma size %d\n", size);
126 return -EINVAL; 129 if (nvif_unpack(args->v0, 0, 0, false)) {
130 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
131 "offset %016llx\n", args->v0.version,
132 args->v0.pushbuf, args->v0.offset);
133 } else
134 return ret;
127 135
128 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 136 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
129 0x10000, args->pushbuf, 137 0x10000, args->v0.pushbuf,
130 (1ULL << NVDEV_ENGINE_DMAOBJ) | 138 (1ULL << NVDEV_ENGINE_DMAOBJ) |
131 (1ULL << NVDEV_ENGINE_SW) | 139 (1ULL << NVDEV_ENGINE_SW) |
132 (1ULL << NVDEV_ENGINE_GR), &chan); 140 (1ULL << NVDEV_ENGINE_GR), &chan);
@@ -134,13 +142,15 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
134 if (ret) 142 if (ret)
135 return ret; 143 return ret;
136 144
145 args->v0.chid = chan->base.chid;
146
137 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 147 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
138 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 148 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
139 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 149 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
140 chan->ramfc = chan->base.chid * 32; 150 chan->ramfc = chan->base.chid * 32;
141 151
142 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); 152 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
143 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); 153 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
144 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); 154 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
145 nv_wo32(priv->ramfc, chan->ramfc + 0x10, 155 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
146 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 156 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -242,13 +252,15 @@ nv04_fifo_ofuncs = {
242 .dtor = nv04_fifo_chan_dtor, 252 .dtor = nv04_fifo_chan_dtor,
243 .init = nv04_fifo_chan_init, 253 .init = nv04_fifo_chan_init,
244 .fini = nv04_fifo_chan_fini, 254 .fini = nv04_fifo_chan_fini,
255 .map = _nouveau_fifo_channel_map,
245 .rd32 = _nouveau_fifo_channel_rd32, 256 .rd32 = _nouveau_fifo_channel_rd32,
246 .wr32 = _nouveau_fifo_channel_wr32, 257 .wr32 = _nouveau_fifo_channel_wr32,
258 .ntfy = _nouveau_fifo_channel_ntfy
247}; 259};
248 260
249static struct nouveau_oclass 261static struct nouveau_oclass
250nv04_fifo_sclass[] = { 262nv04_fifo_sclass[] = {
251 { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs }, 263 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
252 {} 264 {}
253}; 265};
254 266
@@ -539,7 +551,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
539 } 551 }
540 552
541 if (status & 0x40000000) { 553 if (status & 0x40000000) {
542 nouveau_event_trigger(priv->base.uevent, 1, 0); 554 nouveau_fifo_uevent(&priv->base);
543 nv_wr32(priv, 0x002100, 0x40000000); 555 nv_wr32(priv, 0x002100, 0x40000000);
544 status &= ~0x40000000; 556 status &= ~0x40000000;
545 } 557 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 571a22aa1ae5..2a32add51c81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27#include <core/engctx.h> 28#include <core/engctx.h>
28#include <core/ramht.h> 29#include <core/ramht.h>
29 30
@@ -59,16 +60,23 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
59 struct nouveau_oclass *oclass, void *data, u32 size, 60 struct nouveau_oclass *oclass, void *data, u32 size,
60 struct nouveau_object **pobject) 61 struct nouveau_object **pobject)
61{ 62{
63 union {
64 struct nv03_channel_dma_v0 v0;
65 } *args = data;
62 struct nv04_fifo_priv *priv = (void *)engine; 66 struct nv04_fifo_priv *priv = (void *)engine;
63 struct nv04_fifo_chan *chan; 67 struct nv04_fifo_chan *chan;
64 struct nv03_channel_dma_class *args = data;
65 int ret; 68 int ret;
66 69
67 if (size < sizeof(*args)) 70 nv_ioctl(parent, "create channel dma size %d\n", size);
68 return -EINVAL; 71 if (nvif_unpack(args->v0, 0, 0, false)) {
72 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
73 "offset %016llx\n", args->v0.version,
74 args->v0.pushbuf, args->v0.offset);
75 } else
76 return ret;
69 77
70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 78 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
71 0x10000, args->pushbuf, 79 0x10000, args->v0.pushbuf,
72 (1ULL << NVDEV_ENGINE_DMAOBJ) | 80 (1ULL << NVDEV_ENGINE_DMAOBJ) |
73 (1ULL << NVDEV_ENGINE_SW) | 81 (1ULL << NVDEV_ENGINE_SW) |
74 (1ULL << NVDEV_ENGINE_GR), &chan); 82 (1ULL << NVDEV_ENGINE_GR), &chan);
@@ -76,13 +84,15 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
76 if (ret) 84 if (ret)
77 return ret; 85 return ret;
78 86
87 args->v0.chid = chan->base.chid;
88
79 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 89 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
80 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 90 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
81 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 91 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
82 chan->ramfc = chan->base.chid * 32; 92 chan->ramfc = chan->base.chid * 32;
83 93
84 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); 94 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
85 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); 95 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
86 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); 96 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
87 nv_wo32(priv->ramfc, chan->ramfc + 0x14, 97 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
88 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 98 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -100,13 +110,15 @@ nv10_fifo_ofuncs = {
100 .dtor = nv04_fifo_chan_dtor, 110 .dtor = nv04_fifo_chan_dtor,
101 .init = nv04_fifo_chan_init, 111 .init = nv04_fifo_chan_init,
102 .fini = nv04_fifo_chan_fini, 112 .fini = nv04_fifo_chan_fini,
113 .map = _nouveau_fifo_channel_map,
103 .rd32 = _nouveau_fifo_channel_rd32, 114 .rd32 = _nouveau_fifo_channel_rd32,
104 .wr32 = _nouveau_fifo_channel_wr32, 115 .wr32 = _nouveau_fifo_channel_wr32,
116 .ntfy = _nouveau_fifo_channel_ntfy
105}; 117};
106 118
107static struct nouveau_oclass 119static struct nouveau_oclass
108nv10_fifo_sclass[] = { 120nv10_fifo_sclass[] = {
109 { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs }, 121 { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
110 {} 122 {}
111}; 123};
112 124
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index f25760209316..12d76c8adb23 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27#include <core/engctx.h> 28#include <core/engctx.h>
28#include <core/ramht.h> 29#include <core/ramht.h>
29 30
@@ -64,16 +65,23 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
64 struct nouveau_oclass *oclass, void *data, u32 size, 65 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject) 66 struct nouveau_object **pobject)
66{ 67{
68 union {
69 struct nv03_channel_dma_v0 v0;
70 } *args = data;
67 struct nv04_fifo_priv *priv = (void *)engine; 71 struct nv04_fifo_priv *priv = (void *)engine;
68 struct nv04_fifo_chan *chan; 72 struct nv04_fifo_chan *chan;
69 struct nv03_channel_dma_class *args = data;
70 int ret; 73 int ret;
71 74
72 if (size < sizeof(*args)) 75 nv_ioctl(parent, "create channel dma size %d\n", size);
73 return -EINVAL; 76 if (nvif_unpack(args->v0, 0, 0, false)) {
77 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
78 "offset %016llx\n", args->v0.version,
79 args->v0.pushbuf, args->v0.offset);
80 } else
81 return ret;
74 82
75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 83 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
76 0x10000, args->pushbuf, 84 0x10000, args->v0.pushbuf,
77 (1ULL << NVDEV_ENGINE_DMAOBJ) | 85 (1ULL << NVDEV_ENGINE_DMAOBJ) |
78 (1ULL << NVDEV_ENGINE_SW) | 86 (1ULL << NVDEV_ENGINE_SW) |
79 (1ULL << NVDEV_ENGINE_GR) | 87 (1ULL << NVDEV_ENGINE_GR) |
@@ -83,13 +91,15 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
83 if (ret) 91 if (ret)
84 return ret; 92 return ret;
85 93
94 args->v0.chid = chan->base.chid;
95
86 nv_parent(chan)->object_attach = nv04_fifo_object_attach; 96 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
87 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 97 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
88 nv_parent(chan)->context_attach = nv04_fifo_context_attach; 98 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
89 chan->ramfc = chan->base.chid * 64; 99 chan->ramfc = chan->base.chid * 64;
90 100
91 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); 101 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
92 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); 102 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
93 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); 103 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
94 nv_wo32(priv->ramfc, chan->ramfc + 0x14, 104 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
95 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 105 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -107,13 +117,15 @@ nv17_fifo_ofuncs = {
107 .dtor = nv04_fifo_chan_dtor, 117 .dtor = nv04_fifo_chan_dtor,
108 .init = nv04_fifo_chan_init, 118 .init = nv04_fifo_chan_init,
109 .fini = nv04_fifo_chan_fini, 119 .fini = nv04_fifo_chan_fini,
120 .map = _nouveau_fifo_channel_map,
110 .rd32 = _nouveau_fifo_channel_rd32, 121 .rd32 = _nouveau_fifo_channel_rd32,
111 .wr32 = _nouveau_fifo_channel_wr32, 122 .wr32 = _nouveau_fifo_channel_wr32,
123 .ntfy = _nouveau_fifo_channel_ntfy
112}; 124};
113 125
114static struct nouveau_oclass 126static struct nouveau_oclass
115nv17_fifo_sclass[] = { 127nv17_fifo_sclass[] = {
116 { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs }, 128 { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
117 {} 129 {}
118}; 130};
119 131
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 343487ed2238..9f49c3a24dc6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -22,8 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/client.h>
26#include <core/class.h> 26#include <nvif/unpack.h>
27#include <nvif/class.h>
27#include <core/engctx.h> 28#include <core/engctx.h>
28#include <core/ramht.h> 29#include <core/ramht.h>
29 30
@@ -182,16 +183,23 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
182 struct nouveau_oclass *oclass, void *data, u32 size, 183 struct nouveau_oclass *oclass, void *data, u32 size,
183 struct nouveau_object **pobject) 184 struct nouveau_object **pobject)
184{ 185{
186 union {
187 struct nv03_channel_dma_v0 v0;
188 } *args = data;
185 struct nv04_fifo_priv *priv = (void *)engine; 189 struct nv04_fifo_priv *priv = (void *)engine;
186 struct nv04_fifo_chan *chan; 190 struct nv04_fifo_chan *chan;
187 struct nv03_channel_dma_class *args = data;
188 int ret; 191 int ret;
189 192
190 if (size < sizeof(*args)) 193 nv_ioctl(parent, "create channel dma size %d\n", size);
191 return -EINVAL; 194 if (nvif_unpack(args->v0, 0, 0, false)) {
195 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
196 "offset %016llx\n", args->v0.version,
197 args->v0.pushbuf, args->v0.offset);
198 } else
199 return ret;
192 200
193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 201 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
194 0x1000, args->pushbuf, 202 0x1000, args->v0.pushbuf,
195 (1ULL << NVDEV_ENGINE_DMAOBJ) | 203 (1ULL << NVDEV_ENGINE_DMAOBJ) |
196 (1ULL << NVDEV_ENGINE_SW) | 204 (1ULL << NVDEV_ENGINE_SW) |
197 (1ULL << NVDEV_ENGINE_GR) | 205 (1ULL << NVDEV_ENGINE_GR) |
@@ -200,14 +208,16 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
200 if (ret) 208 if (ret)
201 return ret; 209 return ret;
202 210
211 args->v0.chid = chan->base.chid;
212
203 nv_parent(chan)->context_attach = nv40_fifo_context_attach; 213 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
204 nv_parent(chan)->context_detach = nv40_fifo_context_detach; 214 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
205 nv_parent(chan)->object_attach = nv40_fifo_object_attach; 215 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
206 nv_parent(chan)->object_detach = nv04_fifo_object_detach; 216 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
207 chan->ramfc = chan->base.chid * 128; 217 chan->ramfc = chan->base.chid * 128;
208 218
209 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); 219 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
210 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); 220 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
211 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); 221 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
212 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | 222 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
213 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | 223 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -226,13 +236,15 @@ nv40_fifo_ofuncs = {
226 .dtor = nv04_fifo_chan_dtor, 236 .dtor = nv04_fifo_chan_dtor,
227 .init = nv04_fifo_chan_init, 237 .init = nv04_fifo_chan_init,
228 .fini = nv04_fifo_chan_fini, 238 .fini = nv04_fifo_chan_fini,
239 .map = _nouveau_fifo_channel_map,
229 .rd32 = _nouveau_fifo_channel_rd32, 240 .rd32 = _nouveau_fifo_channel_rd32,
230 .wr32 = _nouveau_fifo_channel_wr32, 241 .wr32 = _nouveau_fifo_channel_wr32,
242 .ntfy = _nouveau_fifo_channel_ntfy
231}; 243};
232 244
233static struct nouveau_oclass 245static struct nouveau_oclass
234nv40_fifo_sclass[] = { 246nv40_fifo_sclass[] = {
235 { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs }, 247 { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
236 {} 248 {}
237}; 249};
238 250
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e6352bd5b4ff..5d1e86bc244c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -25,7 +25,8 @@
25#include <core/client.h> 25#include <core/client.h>
26#include <core/engctx.h> 26#include <core/engctx.h>
27#include <core/ramht.h> 27#include <core/ramht.h>
28#include <core/class.h> 28#include <nvif/unpack.h>
29#include <nvif/class.h>
29 30
30#include <subdev/timer.h> 31#include <subdev/timer.h>
31#include <subdev/bar.h> 32#include <subdev/bar.h>
@@ -194,17 +195,24 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
194 struct nouveau_oclass *oclass, void *data, u32 size, 195 struct nouveau_oclass *oclass, void *data, u32 size,
195 struct nouveau_object **pobject) 196 struct nouveau_object **pobject)
196{ 197{
198 union {
199 struct nv03_channel_dma_v0 v0;
200 } *args = data;
197 struct nouveau_bar *bar = nouveau_bar(parent); 201 struct nouveau_bar *bar = nouveau_bar(parent);
198 struct nv50_fifo_base *base = (void *)parent; 202 struct nv50_fifo_base *base = (void *)parent;
199 struct nv50_fifo_chan *chan; 203 struct nv50_fifo_chan *chan;
200 struct nv03_channel_dma_class *args = data;
201 int ret; 204 int ret;
202 205
203 if (size < sizeof(*args)) 206 nv_ioctl(parent, "create channel dma size %d\n", size);
204 return -EINVAL; 207 if (nvif_unpack(args->v0, 0, 0, false)) {
208 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
209 "offset %016llx\n", args->v0.version,
210 args->v0.pushbuf, args->v0.offset);
211 } else
212 return ret;
205 213
206 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 214 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
207 0x2000, args->pushbuf, 215 0x2000, args->v0.pushbuf,
208 (1ULL << NVDEV_ENGINE_DMAOBJ) | 216 (1ULL << NVDEV_ENGINE_DMAOBJ) |
209 (1ULL << NVDEV_ENGINE_SW) | 217 (1ULL << NVDEV_ENGINE_SW) |
210 (1ULL << NVDEV_ENGINE_GR) | 218 (1ULL << NVDEV_ENGINE_GR) |
@@ -213,6 +221,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
213 if (ret) 221 if (ret)
214 return ret; 222 return ret;
215 223
224 args->v0.chid = chan->base.chid;
225
216 nv_parent(chan)->context_attach = nv50_fifo_context_attach; 226 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
217 nv_parent(chan)->context_detach = nv50_fifo_context_detach; 227 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
218 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 228 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -223,10 +233,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
223 if (ret) 233 if (ret)
224 return ret; 234 return ret;
225 235
226 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); 236 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
227 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); 237 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
228 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); 238 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
229 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); 239 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
230 nv_wo32(base->ramfc, 0x3c, 0x003f6078); 240 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
231 nv_wo32(base->ramfc, 0x44, 0x01003fff); 241 nv_wo32(base->ramfc, 0x44, 0x01003fff);
232 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); 242 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -247,18 +257,26 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
247 struct nouveau_oclass *oclass, void *data, u32 size, 257 struct nouveau_oclass *oclass, void *data, u32 size,
248 struct nouveau_object **pobject) 258 struct nouveau_object **pobject)
249{ 259{
250 struct nv50_channel_ind_class *args = data; 260 union {
261 struct nv50_channel_gpfifo_v0 v0;
262 } *args = data;
251 struct nouveau_bar *bar = nouveau_bar(parent); 263 struct nouveau_bar *bar = nouveau_bar(parent);
252 struct nv50_fifo_base *base = (void *)parent; 264 struct nv50_fifo_base *base = (void *)parent;
253 struct nv50_fifo_chan *chan; 265 struct nv50_fifo_chan *chan;
254 u64 ioffset, ilength; 266 u64 ioffset, ilength;
255 int ret; 267 int ret;
256 268
257 if (size < sizeof(*args)) 269 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
258 return -EINVAL; 270 if (nvif_unpack(args->v0, 0, 0, false)) {
271 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
272 "ioffset %016llx ilength %08x\n",
273 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
274 args->v0.ilength);
275 } else
276 return ret;
259 277
260 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 278 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
261 0x2000, args->pushbuf, 279 0x2000, args->v0.pushbuf,
262 (1ULL << NVDEV_ENGINE_DMAOBJ) | 280 (1ULL << NVDEV_ENGINE_DMAOBJ) |
263 (1ULL << NVDEV_ENGINE_SW) | 281 (1ULL << NVDEV_ENGINE_SW) |
264 (1ULL << NVDEV_ENGINE_GR) | 282 (1ULL << NVDEV_ENGINE_GR) |
@@ -267,6 +285,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
267 if (ret) 285 if (ret)
268 return ret; 286 return ret;
269 287
288 args->v0.chid = chan->base.chid;
289
270 nv_parent(chan)->context_attach = nv50_fifo_context_attach; 290 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
271 nv_parent(chan)->context_detach = nv50_fifo_context_detach; 291 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
272 nv_parent(chan)->object_attach = nv50_fifo_object_attach; 292 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -277,8 +297,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
277 if (ret) 297 if (ret)
278 return ret; 298 return ret;
279 299
280 ioffset = args->ioffset; 300 ioffset = args->v0.ioffset;
281 ilength = order_base_2(args->ilength / 8); 301 ilength = order_base_2(args->v0.ilength / 8);
282 302
283 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 303 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
284 nv_wo32(base->ramfc, 0x44, 0x01003fff); 304 nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -343,8 +363,10 @@ nv50_fifo_ofuncs_dma = {
343 .dtor = nv50_fifo_chan_dtor, 363 .dtor = nv50_fifo_chan_dtor,
344 .init = nv50_fifo_chan_init, 364 .init = nv50_fifo_chan_init,
345 .fini = nv50_fifo_chan_fini, 365 .fini = nv50_fifo_chan_fini,
366 .map = _nouveau_fifo_channel_map,
346 .rd32 = _nouveau_fifo_channel_rd32, 367 .rd32 = _nouveau_fifo_channel_rd32,
347 .wr32 = _nouveau_fifo_channel_wr32, 368 .wr32 = _nouveau_fifo_channel_wr32,
369 .ntfy = _nouveau_fifo_channel_ntfy
348}; 370};
349 371
350static struct nouveau_ofuncs 372static struct nouveau_ofuncs
@@ -353,14 +375,16 @@ nv50_fifo_ofuncs_ind = {
353 .dtor = nv50_fifo_chan_dtor, 375 .dtor = nv50_fifo_chan_dtor,
354 .init = nv50_fifo_chan_init, 376 .init = nv50_fifo_chan_init,
355 .fini = nv50_fifo_chan_fini, 377 .fini = nv50_fifo_chan_fini,
378 .map = _nouveau_fifo_channel_map,
356 .rd32 = _nouveau_fifo_channel_rd32, 379 .rd32 = _nouveau_fifo_channel_rd32,
357 .wr32 = _nouveau_fifo_channel_wr32, 380 .wr32 = _nouveau_fifo_channel_wr32,
381 .ntfy = _nouveau_fifo_channel_ntfy
358}; 382};
359 383
360static struct nouveau_oclass 384static struct nouveau_oclass
361nv50_fifo_sclass[] = { 385nv50_fifo_sclass[] = {
362 { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma }, 386 { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
363 { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind }, 387 { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
364 {} 388 {}
365}; 389};
366 390
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 6e5ac16e5460..1f42996b354a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -27,7 +27,8 @@
27#include <core/engctx.h> 27#include <core/engctx.h>
28#include <core/ramht.h> 28#include <core/ramht.h>
29#include <core/event.h> 29#include <core/event.h>
30#include <core/class.h> 30#include <nvif/unpack.h>
31#include <nvif/class.h>
31 32
32#include <subdev/timer.h> 33#include <subdev/timer.h>
33#include <subdev/bar.h> 34#include <subdev/bar.h>
@@ -160,17 +161,24 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
160 struct nouveau_oclass *oclass, void *data, u32 size, 161 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject) 162 struct nouveau_object **pobject)
162{ 163{
164 union {
165 struct nv03_channel_dma_v0 v0;
166 } *args = data;
163 struct nouveau_bar *bar = nouveau_bar(parent); 167 struct nouveau_bar *bar = nouveau_bar(parent);
164 struct nv50_fifo_base *base = (void *)parent; 168 struct nv50_fifo_base *base = (void *)parent;
165 struct nv50_fifo_chan *chan; 169 struct nv50_fifo_chan *chan;
166 struct nv03_channel_dma_class *args = data;
167 int ret; 170 int ret;
168 171
169 if (size < sizeof(*args)) 172 nv_ioctl(parent, "create channel dma size %d\n", size);
170 return -EINVAL; 173 if (nvif_unpack(args->v0, 0, 0, false)) {
174 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
175 "offset %016llx\n", args->v0.version,
176 args->v0.pushbuf, args->v0.offset);
177 } else
178 return ret;
171 179
172 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 180 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
173 0x2000, args->pushbuf, 181 0x2000, args->v0.pushbuf,
174 (1ULL << NVDEV_ENGINE_DMAOBJ) | 182 (1ULL << NVDEV_ENGINE_DMAOBJ) |
175 (1ULL << NVDEV_ENGINE_SW) | 183 (1ULL << NVDEV_ENGINE_SW) |
176 (1ULL << NVDEV_ENGINE_GR) | 184 (1ULL << NVDEV_ENGINE_GR) |
@@ -186,6 +194,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
186 if (ret) 194 if (ret)
187 return ret; 195 return ret;
188 196
197 args->v0.chid = chan->base.chid;
198
189 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, 199 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
190 &chan->ramht); 200 &chan->ramht);
191 if (ret) 201 if (ret)
@@ -196,10 +206,10 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
196 nv_parent(chan)->object_attach = nv84_fifo_object_attach; 206 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
197 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 207 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
198 208
199 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); 209 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
200 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); 210 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
201 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); 211 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
202 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); 212 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
203 nv_wo32(base->ramfc, 0x3c, 0x003f6078); 213 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
204 nv_wo32(base->ramfc, 0x44, 0x01003fff); 214 nv_wo32(base->ramfc, 0x44, 0x01003fff);
205 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); 215 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -222,18 +232,26 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
222 struct nouveau_oclass *oclass, void *data, u32 size, 232 struct nouveau_oclass *oclass, void *data, u32 size,
223 struct nouveau_object **pobject) 233 struct nouveau_object **pobject)
224{ 234{
235 union {
236 struct nv50_channel_gpfifo_v0 v0;
237 } *args = data;
225 struct nouveau_bar *bar = nouveau_bar(parent); 238 struct nouveau_bar *bar = nouveau_bar(parent);
226 struct nv50_fifo_base *base = (void *)parent; 239 struct nv50_fifo_base *base = (void *)parent;
227 struct nv50_fifo_chan *chan; 240 struct nv50_fifo_chan *chan;
228 struct nv50_channel_ind_class *args = data;
229 u64 ioffset, ilength; 241 u64 ioffset, ilength;
230 int ret; 242 int ret;
231 243
232 if (size < sizeof(*args)) 244 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
233 return -EINVAL; 245 if (nvif_unpack(args->v0, 0, 0, false)) {
246 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
247 "ioffset %016llx ilength %08x\n",
248 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
249 args->v0.ilength);
250 } else
251 return ret;
234 252
235 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 253 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
236 0x2000, args->pushbuf, 254 0x2000, args->v0.pushbuf,
237 (1ULL << NVDEV_ENGINE_DMAOBJ) | 255 (1ULL << NVDEV_ENGINE_DMAOBJ) |
238 (1ULL << NVDEV_ENGINE_SW) | 256 (1ULL << NVDEV_ENGINE_SW) |
239 (1ULL << NVDEV_ENGINE_GR) | 257 (1ULL << NVDEV_ENGINE_GR) |
@@ -249,6 +267,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
249 if (ret) 267 if (ret)
250 return ret; 268 return ret;
251 269
270 args->v0.chid = chan->base.chid;
271
252 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, 272 ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
253 &chan->ramht); 273 &chan->ramht);
254 if (ret) 274 if (ret)
@@ -259,8 +279,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
259 nv_parent(chan)->object_attach = nv84_fifo_object_attach; 279 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
260 nv_parent(chan)->object_detach = nv50_fifo_object_detach; 280 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
261 281
262 ioffset = args->ioffset; 282 ioffset = args->v0.ioffset;
263 ilength = order_base_2(args->ilength / 8); 283 ilength = order_base_2(args->v0.ilength / 8);
264 284
265 nv_wo32(base->ramfc, 0x3c, 0x403f6078); 285 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
266 nv_wo32(base->ramfc, 0x44, 0x01003fff); 286 nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -304,8 +324,10 @@ nv84_fifo_ofuncs_dma = {
304 .dtor = nv50_fifo_chan_dtor, 324 .dtor = nv50_fifo_chan_dtor,
305 .init = nv84_fifo_chan_init, 325 .init = nv84_fifo_chan_init,
306 .fini = nv50_fifo_chan_fini, 326 .fini = nv50_fifo_chan_fini,
327 .map = _nouveau_fifo_channel_map,
307 .rd32 = _nouveau_fifo_channel_rd32, 328 .rd32 = _nouveau_fifo_channel_rd32,
308 .wr32 = _nouveau_fifo_channel_wr32, 329 .wr32 = _nouveau_fifo_channel_wr32,
330 .ntfy = _nouveau_fifo_channel_ntfy
309}; 331};
310 332
311static struct nouveau_ofuncs 333static struct nouveau_ofuncs
@@ -314,14 +336,16 @@ nv84_fifo_ofuncs_ind = {
314 .dtor = nv50_fifo_chan_dtor, 336 .dtor = nv50_fifo_chan_dtor,
315 .init = nv84_fifo_chan_init, 337 .init = nv84_fifo_chan_init,
316 .fini = nv50_fifo_chan_fini, 338 .fini = nv50_fifo_chan_fini,
339 .map = _nouveau_fifo_channel_map,
317 .rd32 = _nouveau_fifo_channel_rd32, 340 .rd32 = _nouveau_fifo_channel_rd32,
318 .wr32 = _nouveau_fifo_channel_wr32, 341 .wr32 = _nouveau_fifo_channel_wr32,
342 .ntfy = _nouveau_fifo_channel_ntfy
319}; 343};
320 344
321static struct nouveau_oclass 345static struct nouveau_oclass
322nv84_fifo_sclass[] = { 346nv84_fifo_sclass[] = {
323 { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma }, 347 { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma },
324 { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind }, 348 { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind },
325 {} 349 {}
326}; 350};
327 351
@@ -389,19 +413,26 @@ nv84_fifo_cclass = {
389 ******************************************************************************/ 413 ******************************************************************************/
390 414
391static void 415static void
392nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index) 416nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
393{ 417{
394 struct nv84_fifo_priv *priv = event->priv; 418 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
395 nv_mask(priv, 0x002140, 0x40000000, 0x40000000); 419 nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
396} 420}
397 421
398static void 422static void
399nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index) 423nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
400{ 424{
401 struct nv84_fifo_priv *priv = event->priv; 425 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
402 nv_mask(priv, 0x002140, 0x40000000, 0x00000000); 426 nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
403} 427}
404 428
429static const struct nvkm_event_func
430nv84_fifo_uevent_func = {
431 .ctor = nouveau_fifo_uevent_ctor,
432 .init = nv84_fifo_uevent_init,
433 .fini = nv84_fifo_uevent_fini,
434};
435
405static int 436static int
406nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 437nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
407 struct nouveau_oclass *oclass, void *data, u32 size, 438 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -425,9 +456,9 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
425 if (ret) 456 if (ret)
426 return ret; 457 return ret;
427 458
428 priv->base.uevent->enable = nv84_fifo_uevent_enable; 459 ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent);
429 priv->base.uevent->disable = nv84_fifo_uevent_disable; 460 if (ret)
430 priv->base.uevent->priv = priv; 461 return ret;
431 462
432 nv_subdev(priv)->unit = 0x00000100; 463 nv_subdev(priv)->unit = 0x00000100;
433 nv_subdev(priv)->intr = nv04_fifo_intr; 464 nv_subdev(priv)->intr = nv04_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ae4a4dc5642a..1fe1f8fbda0c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -28,7 +28,8 @@
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h> 30#include <core/event.h>
31#include <core/class.h> 31#include <nvif/unpack.h>
32#include <nvif/class.h>
32#include <core/enum.h> 33#include <core/enum.h>
33 34
34#include <subdev/timer.h> 35#include <subdev/timer.h>
@@ -187,20 +188,28 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
187 struct nouveau_oclass *oclass, void *data, u32 size, 188 struct nouveau_oclass *oclass, void *data, u32 size,
188 struct nouveau_object **pobject) 189 struct nouveau_object **pobject)
189{ 190{
191 union {
192 struct nv50_channel_gpfifo_v0 v0;
193 } *args = data;
190 struct nouveau_bar *bar = nouveau_bar(parent); 194 struct nouveau_bar *bar = nouveau_bar(parent);
191 struct nvc0_fifo_priv *priv = (void *)engine; 195 struct nvc0_fifo_priv *priv = (void *)engine;
192 struct nvc0_fifo_base *base = (void *)parent; 196 struct nvc0_fifo_base *base = (void *)parent;
193 struct nvc0_fifo_chan *chan; 197 struct nvc0_fifo_chan *chan;
194 struct nv50_channel_ind_class *args = data;
195 u64 usermem, ioffset, ilength; 198 u64 usermem, ioffset, ilength;
196 int ret, i; 199 int ret, i;
197 200
198 if (size < sizeof(*args)) 201 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
199 return -EINVAL; 202 if (nvif_unpack(args->v0, 0, 0, false)) {
203 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
204 "ioffset %016llx ilength %08x\n",
205 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
206 args->v0.ilength);
207 } else
208 return ret;
200 209
201 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, 210 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
202 priv->user.bar.offset, 0x1000, 211 priv->user.bar.offset, 0x1000,
203 args->pushbuf, 212 args->v0.pushbuf,
204 (1ULL << NVDEV_ENGINE_SW) | 213 (1ULL << NVDEV_ENGINE_SW) |
205 (1ULL << NVDEV_ENGINE_GR) | 214 (1ULL << NVDEV_ENGINE_GR) |
206 (1ULL << NVDEV_ENGINE_COPY0) | 215 (1ULL << NVDEV_ENGINE_COPY0) |
@@ -212,12 +221,14 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
212 if (ret) 221 if (ret)
213 return ret; 222 return ret;
214 223
224 args->v0.chid = chan->base.chid;
225
215 nv_parent(chan)->context_attach = nvc0_fifo_context_attach; 226 nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
216 nv_parent(chan)->context_detach = nvc0_fifo_context_detach; 227 nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
217 228
218 usermem = chan->base.chid * 0x1000; 229 usermem = chan->base.chid * 0x1000;
219 ioffset = args->ioffset; 230 ioffset = args->v0.ioffset;
220 ilength = order_base_2(args->ilength / 8); 231 ilength = order_base_2(args->v0.ilength / 8);
221 232
222 for (i = 0; i < 0x1000; i += 4) 233 for (i = 0; i < 0x1000; i += 4)
223 nv_wo32(priv->user.mem, usermem + i, 0x00000000); 234 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -291,13 +302,15 @@ nvc0_fifo_ofuncs = {
291 .dtor = _nouveau_fifo_channel_dtor, 302 .dtor = _nouveau_fifo_channel_dtor,
292 .init = nvc0_fifo_chan_init, 303 .init = nvc0_fifo_chan_init,
293 .fini = nvc0_fifo_chan_fini, 304 .fini = nvc0_fifo_chan_fini,
305 .map = _nouveau_fifo_channel_map,
294 .rd32 = _nouveau_fifo_channel_rd32, 306 .rd32 = _nouveau_fifo_channel_rd32,
295 .wr32 = _nouveau_fifo_channel_wr32, 307 .wr32 = _nouveau_fifo_channel_wr32,
308 .ntfy = _nouveau_fifo_channel_ntfy
296}; 309};
297 310
298static struct nouveau_oclass 311static struct nouveau_oclass
299nvc0_fifo_sclass[] = { 312nvc0_fifo_sclass[] = {
300 { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs }, 313 { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs },
301 {} 314 {}
302}; 315};
303 316
@@ -654,7 +667,7 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
654 object = engctx; 667 object = engctx;
655 while (object) { 668 while (object) {
656 switch (nv_mclass(object)) { 669 switch (nv_mclass(object)) {
657 case NVC0_CHANNEL_IND_CLASS: 670 case FERMI_CHANNEL_GPFIFO:
658 nvc0_fifo_recover(priv, engine, (void *)object); 671 nvc0_fifo_recover(priv, engine, (void *)object);
659 break; 672 break;
660 } 673 }
@@ -730,7 +743,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
730 for (unkn = 0; unkn < 8; unkn++) { 743 for (unkn = 0; unkn < 8; unkn++) {
731 u32 ints = (intr >> (unkn * 0x04)) & inte; 744 u32 ints = (intr >> (unkn * 0x04)) & inte;
732 if (ints & 0x1) { 745 if (ints & 0x1) {
733 nouveau_event_trigger(priv->base.uevent, 1, 0); 746 nouveau_fifo_uevent(&priv->base);
734 ints &= ~1; 747 ints &= ~1;
735 } 748 }
736 if (ints) { 749 if (ints) {
@@ -827,19 +840,26 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
827} 840}
828 841
829static void 842static void
830nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) 843nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
831{ 844{
832 struct nvc0_fifo_priv *priv = event->priv; 845 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
833 nv_mask(priv, 0x002140, 0x80000000, 0x80000000); 846 nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
834} 847}
835 848
836static void 849static void
837nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) 850nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
838{ 851{
839 struct nvc0_fifo_priv *priv = event->priv; 852 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
840 nv_mask(priv, 0x002140, 0x80000000, 0x00000000); 853 nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
841} 854}
842 855
856static const struct nvkm_event_func
857nvc0_fifo_uevent_func = {
858 .ctor = nouveau_fifo_uevent_ctor,
859 .init = nvc0_fifo_uevent_init,
860 .fini = nvc0_fifo_uevent_fini,
861};
862
843static int 863static int
844nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 864nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
845 struct nouveau_oclass *oclass, void *data, u32 size, 865 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -877,9 +897,9 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
877 if (ret) 897 if (ret)
878 return ret; 898 return ret;
879 899
880 priv->base.uevent->enable = nvc0_fifo_uevent_enable; 900 ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent);
881 priv->base.uevent->disable = nvc0_fifo_uevent_disable; 901 if (ret)
882 priv->base.uevent->priv = priv; 902 return ret;
883 903
884 nv_subdev(priv)->unit = 0x00000100; 904 nv_subdev(priv)->unit = 0x00000100;
885 nv_subdev(priv)->intr = nvc0_fifo_intr; 905 nv_subdev(priv)->intr = nvc0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 298063edb92d..d2f0fd39c145 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -28,7 +28,8 @@
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h> 30#include <core/event.h>
31#include <core/class.h> 31#include <nvif/unpack.h>
32#include <nvif/class.h>
32#include <core/enum.h> 33#include <core/enum.h>
33 34
34#include <subdev/timer.h> 35#include <subdev/timer.h>
@@ -216,46 +217,56 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
216 struct nouveau_oclass *oclass, void *data, u32 size, 217 struct nouveau_oclass *oclass, void *data, u32 size,
217 struct nouveau_object **pobject) 218 struct nouveau_object **pobject)
218{ 219{
220 union {
221 struct kepler_channel_gpfifo_a_v0 v0;
222 } *args = data;
219 struct nouveau_bar *bar = nouveau_bar(parent); 223 struct nouveau_bar *bar = nouveau_bar(parent);
220 struct nve0_fifo_priv *priv = (void *)engine; 224 struct nve0_fifo_priv *priv = (void *)engine;
221 struct nve0_fifo_base *base = (void *)parent; 225 struct nve0_fifo_base *base = (void *)parent;
222 struct nve0_fifo_chan *chan; 226 struct nve0_fifo_chan *chan;
223 struct nve0_channel_ind_class *args = data;
224 u64 usermem, ioffset, ilength; 227 u64 usermem, ioffset, ilength;
225 int ret, i; 228 int ret, i;
226 229
227 if (size < sizeof(*args)) 230 nv_ioctl(parent, "create channel gpfifo size %d\n", size);
228 return -EINVAL; 231 if (nvif_unpack(args->v0, 0, 0, false)) {
232 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
233 "ioffset %016llx ilength %08x engine %08x\n",
234 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
235 args->v0.ilength, args->v0.engine);
236 } else
237 return ret;
229 238
230 for (i = 0; i < FIFO_ENGINE_NR; i++) { 239 for (i = 0; i < FIFO_ENGINE_NR; i++) {
231 if (args->engine & (1 << i)) { 240 if (args->v0.engine & (1 << i)) {
232 if (nouveau_engine(parent, fifo_engine[i].subdev)) { 241 if (nouveau_engine(parent, fifo_engine[i].subdev)) {
233 args->engine = (1 << i); 242 args->v0.engine = (1 << i);
234 break; 243 break;
235 } 244 }
236 } 245 }
237 } 246 }
238 247
239 if (i == FIFO_ENGINE_NR) { 248 if (i == FIFO_ENGINE_NR) {
240 nv_error(priv, "unsupported engines 0x%08x\n", args->engine); 249 nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
241 return -ENODEV; 250 return -ENODEV;
242 } 251 }
243 252
244 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, 253 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
245 priv->user.bar.offset, 0x200, 254 priv->user.bar.offset, 0x200,
246 args->pushbuf, 255 args->v0.pushbuf,
247 fifo_engine[i].mask, &chan); 256 fifo_engine[i].mask, &chan);
248 *pobject = nv_object(chan); 257 *pobject = nv_object(chan);
249 if (ret) 258 if (ret)
250 return ret; 259 return ret;
251 260
261 args->v0.chid = chan->base.chid;
262
252 nv_parent(chan)->context_attach = nve0_fifo_context_attach; 263 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
253 nv_parent(chan)->context_detach = nve0_fifo_context_detach; 264 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
254 chan->engine = i; 265 chan->engine = i;
255 266
256 usermem = chan->base.chid * 0x200; 267 usermem = chan->base.chid * 0x200;
257 ioffset = args->ioffset; 268 ioffset = args->v0.ioffset;
258 ilength = order_base_2(args->ilength / 8); 269 ilength = order_base_2(args->v0.ilength / 8);
259 270
260 for (i = 0; i < 0x200; i += 4) 271 for (i = 0; i < 0x200; i += 4)
261 nv_wo32(priv->user.mem, usermem + i, 0x00000000); 272 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -325,13 +336,15 @@ nve0_fifo_ofuncs = {
325 .dtor = _nouveau_fifo_channel_dtor, 336 .dtor = _nouveau_fifo_channel_dtor,
326 .init = nve0_fifo_chan_init, 337 .init = nve0_fifo_chan_init,
327 .fini = nve0_fifo_chan_fini, 338 .fini = nve0_fifo_chan_fini,
339 .map = _nouveau_fifo_channel_map,
328 .rd32 = _nouveau_fifo_channel_rd32, 340 .rd32 = _nouveau_fifo_channel_rd32,
329 .wr32 = _nouveau_fifo_channel_wr32, 341 .wr32 = _nouveau_fifo_channel_wr32,
342 .ntfy = _nouveau_fifo_channel_ntfy
330}; 343};
331 344
332static struct nouveau_oclass 345static struct nouveau_oclass
333nve0_fifo_sclass[] = { 346nve0_fifo_sclass[] = {
334 { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs }, 347 { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs },
335 {} 348 {}
336}; 349};
337 350
@@ -769,7 +782,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
769 object = engctx; 782 object = engctx;
770 while (object) { 783 while (object) {
771 switch (nv_mclass(object)) { 784 switch (nv_mclass(object)) {
772 case NVE0_CHANNEL_IND_CLASS: 785 case KEPLER_CHANNEL_GPFIFO_A:
773 nve0_fifo_recover(priv, engine, (void *)object); 786 nve0_fifo_recover(priv, engine, (void *)object);
774 break; 787 break;
775 } 788 }
@@ -859,7 +872,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
859static void 872static void
860nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) 873nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
861{ 874{
862 nouveau_event_trigger(priv->base.uevent, 1, 0); 875 nouveau_fifo_uevent(&priv->base);
863} 876}
864 877
865static void 878static void
@@ -952,19 +965,26 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952} 965}
953 966
954static void 967static void
955nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) 968nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
956{ 969{
957 struct nve0_fifo_priv *priv = event->priv; 970 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
958 nv_mask(priv, 0x002140, 0x80000000, 0x80000000); 971 nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
959} 972}
960 973
961static void 974static void
962nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) 975nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
963{ 976{
964 struct nve0_fifo_priv *priv = event->priv; 977 struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
965 nv_mask(priv, 0x002140, 0x80000000, 0x00000000); 978 nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
966} 979}
967 980
981static const struct nvkm_event_func
982nve0_fifo_uevent_func = {
983 .ctor = nouveau_fifo_uevent_ctor,
984 .init = nve0_fifo_uevent_init,
985 .fini = nve0_fifo_uevent_fini,
986};
987
968int 988int
969nve0_fifo_fini(struct nouveau_object *object, bool suspend) 989nve0_fifo_fini(struct nouveau_object *object, bool suspend)
970{ 990{
@@ -1067,9 +1087,9 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1067 if (ret) 1087 if (ret)
1068 return ret; 1088 return ret;
1069 1089
1070 priv->base.uevent->enable = nve0_fifo_uevent_enable; 1090 ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent);
1071 priv->base.uevent->disable = nve0_fifo_uevent_disable; 1091 if (ret)
1072 priv->base.uevent->priv = priv; 1092 return ret;
1073 1093
1074 nv_subdev(priv)->unit = 0x00000100; 1094 nv_subdev(priv)->unit = 0x00000100;
1075 nv_subdev(priv)->intr = nve0_fifo_intr; 1095 nv_subdev(priv)->intr = nve0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
new file mode 100644
index 000000000000..3adb7fe91772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "ctxnvc0.h"
26
27/*******************************************************************************
28 * PGRAPH context register lists
29 ******************************************************************************/
30
31static const struct nvc0_graph_init
32gk110b_grctx_init_sm_0[] = {
33 { 0x419e04, 1, 0x04, 0x00000000 },
34 { 0x419e08, 1, 0x04, 0x0000001d },
35 { 0x419e0c, 1, 0x04, 0x00000000 },
36 { 0x419e10, 1, 0x04, 0x00001c02 },
37 { 0x419e44, 1, 0x04, 0x0013eff2 },
38 { 0x419e48, 1, 0x04, 0x00000000 },
39 { 0x419e4c, 1, 0x04, 0x0000007f },
40 { 0x419e50, 2, 0x04, 0x00000000 },
41 { 0x419e58, 1, 0x04, 0x00000001 },
42 { 0x419e5c, 3, 0x04, 0x00000000 },
43 { 0x419e68, 1, 0x04, 0x00000002 },
44 { 0x419e6c, 12, 0x04, 0x00000000 },
45 { 0x419eac, 1, 0x04, 0x00001f8f },
46 { 0x419eb0, 1, 0x04, 0x0db00d2f },
47 { 0x419eb8, 1, 0x04, 0x00000000 },
48 { 0x419ec8, 1, 0x04, 0x0001304f },
49 { 0x419f30, 4, 0x04, 0x00000000 },
50 { 0x419f40, 1, 0x04, 0x00000018 },
51 { 0x419f44, 3, 0x04, 0x00000000 },
52 { 0x419f58, 1, 0x04, 0x00000000 },
53 { 0x419f70, 1, 0x04, 0x00006300 },
54 { 0x419f78, 1, 0x04, 0x000000eb },
55 { 0x419f7c, 1, 0x04, 0x00000404 },
56 {}
57};
58
59static const struct nvc0_graph_pack
60gk110b_grctx_pack_tpc[] = {
61 { nvd7_grctx_init_pe_0 },
62 { nvf0_grctx_init_tex_0 },
63 { nvf0_grctx_init_mpc_0 },
64 { nvf0_grctx_init_l1c_0 },
65 { gk110b_grctx_init_sm_0 },
66 {}
67};
68
69/*******************************************************************************
70 * PGRAPH context implementation
71 ******************************************************************************/
72
73struct nouveau_oclass *
74gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) {
75 .base.handle = NV_ENGCTX(GR, 0xf1),
76 .base.ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = nvc0_graph_context_ctor,
78 .dtor = nvc0_graph_context_dtor,
79 .init = _nouveau_graph_context_init,
80 .fini = _nouveau_graph_context_fini,
81 .rd32 = _nouveau_graph_context_rd32,
82 .wr32 = _nouveau_graph_context_wr32,
83 },
84 .main = nve4_grctx_generate_main,
85 .unkn = nve4_grctx_generate_unkn,
86 .hub = nvf0_grctx_pack_hub,
87 .gpc = nvf0_grctx_pack_gpc,
88 .zcull = nvc0_grctx_pack_zcull,
89 .tpc = gk110b_grctx_pack_tpc,
90 .ppc = nvf0_grctx_pack_ppc,
91 .icmd = nvf0_grctx_pack_icmd,
92 .mthd = nvf0_grctx_pack_mthd,
93 .bundle = nve4_grctx_generate_bundle,
94 .bundle_size = 0x3000,
95 .bundle_min_gpm_fifo_depth = 0x180,
96 .bundle_token_limit = 0x600,
97 .pagepool = nve4_grctx_generate_pagepool,
98 .pagepool_size = 0x8000,
99 .attrib = nvd7_grctx_generate_attrib,
100 .attrib_nr_max = 0x324,
101 .attrib_nr = 0x218,
102 .alpha_nr_max = 0x7ff,
103 .alpha_nr = 0x648,
104}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
index 224ee0287ab7..36fc9831cc93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
@@ -41,7 +41,6 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
41 .wr32 = _nouveau_graph_context_wr32, 41 .wr32 = _nouveau_graph_context_wr32,
42 }, 42 },
43 .main = nve4_grctx_generate_main, 43 .main = nve4_grctx_generate_main,
44 .mods = nve4_grctx_generate_mods,
45 .unkn = nve4_grctx_generate_unkn, 44 .unkn = nve4_grctx_generate_unkn,
46 .hub = nve4_grctx_pack_hub, 45 .hub = nve4_grctx_pack_hub,
47 .gpc = nve4_grctx_pack_gpc, 46 .gpc = nve4_grctx_pack_gpc,
@@ -50,4 +49,15 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
50 .ppc = nve4_grctx_pack_ppc, 49 .ppc = nve4_grctx_pack_ppc,
51 .icmd = nve4_grctx_pack_icmd, 50 .icmd = nve4_grctx_pack_icmd,
52 .mthd = gk20a_grctx_pack_mthd, 51 .mthd = gk20a_grctx_pack_mthd,
52 .bundle = nve4_grctx_generate_bundle,
53 .bundle_size = 0x1800,
54 .bundle_min_gpm_fifo_depth = 0x62,
55 .bundle_token_limit = 0x100,
56 .pagepool = nve4_grctx_generate_pagepool,
57 .pagepool_size = 0x8000,
58 .attrib = nvd7_grctx_generate_attrib,
59 .attrib_nr_max = 0x240,
60 .attrib_nr = 0x240,
61 .alpha_nr_max = 0x648 + (0x648 / 2),
62 .alpha_nr = 0x648,
53}.base; 63}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index b0d0fb2f4d08..62e918b9fa81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -859,45 +859,74 @@ gm107_grctx_pack_ppc[] = {
859 ******************************************************************************/ 859 ******************************************************************************/
860 860
861static void 861static void
862gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 862gm107_grctx_generate_bundle(struct nvc0_grctx *info)
863{ 863{
864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 864 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 865 const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
866 mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); 866 impl->bundle_size / 0x20);
867 867 const u32 token_limit = impl->bundle_token_limit;
868 mmio_list(0x40800c, 0x00000000, 8, 1); 868 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
869 mmio_list(0x408010, 0x80000000, 0, 0); 869 const int s = 8;
870 mmio_list(0x419004, 0x00000000, 8, 1); 870 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
871 mmio_list(0x419008, 0x00000000, 0, 0); 871 mmio_refn(info, 0x408004, 0x00000000, s, b);
872 mmio_list(0x4064cc, 0x80000000, 0, 0); 872 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
873 mmio_list(0x418e30, 0x80000000, 0, 0); 873 mmio_refn(info, 0x418e24, 0x00000000, s, b);
874 874 mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b);
875 mmio_list(0x408004, 0x00000000, 8, 0); 875 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
876 mmio_list(0x408008, 0x80000030, 0, 0); 876}
877 mmio_list(0x418e24, 0x00000000, 8, 0); 877
878 mmio_list(0x418e28, 0x80000030, 0, 0); 878static void
879 879gm107_grctx_generate_pagepool(struct nvc0_grctx *info)
880 mmio_list(0x4064c8, 0x018002c0, 0, 0); 880{
881 881 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
882 mmio_list(0x418810, 0x80000000, 12, 2); 882 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
883 mmio_list(0x419848, 0x10000000, 12, 2); 883 const int s = 8;
884 mmio_list(0x419c2c, 0x10000000, 12, 2); 884 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
885 885 mmio_refn(info, 0x40800c, 0x00000000, s, b);
886 mmio_list(0x405830, 0x0aa01000, 0, 0); 886 mmio_wr32(info, 0x408010, 0x80000000);
887 mmio_list(0x4064c4, 0x0400ffff, 0, 0); 887 mmio_refn(info, 0x419004, 0x00000000, s, b);
888 888 mmio_wr32(info, 0x419008, 0x00000000);
889 /*XXX*/ 889 mmio_wr32(info, 0x4064cc, 0x80000000);
890 mmio_list(0x5030c0, 0x00001540, 0, 0); 890 mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */
891 mmio_list(0x5030f4, 0x00000000, 0, 0); 891}
892 mmio_list(0x5030e4, 0x00002000, 0, 0); 892
893 mmio_list(0x5030f8, 0x00003fc0, 0, 0); 893static void
894 mmio_list(0x418ea0, 0x07151540, 0, 0); 894gm107_grctx_generate_attrib(struct nvc0_grctx *info)
895 895{
896 mmio_list(0x5032c0, 0x00001540, 0, 0); 896 struct nvc0_graph_priv *priv = info->priv;
897 mmio_list(0x5032f4, 0x00001fe0, 0, 0); 897 const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv);
898 mmio_list(0x5032e4, 0x00002000, 0, 0); 898 const u32 alpha = impl->alpha_nr;
899 mmio_list(0x5032f8, 0x00006fc0, 0, 0); 899 const u32 attrib = impl->attrib_nr;
900 mmio_list(0x418ea4, 0x07151540, 0, 0); 900 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
901 const u32 access = NV_MEM_ACCESS_RW;
902 const int s = 12;
903 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
904 const int max_batches = 0xffff;
905 u32 bo = 0;
906 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
907 int gpc, ppc, n = 0;
908
909 mmio_refn(info, 0x418810, 0x80000000, s, b);
910 mmio_refn(info, 0x419848, 0x10000000, s, b);
911 mmio_refn(info, 0x419c2c, 0x10000000, s, b);
912 mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
913 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
914
915 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
916 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
917 const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc];
918 const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
919 const u32 u = 0x418ea0 + (n * 0x04);
920 const u32 o = PPC_UNIT(gpc, ppc, 0);
921 mmio_wr32(info, o + 0xc0, bs);
922 mmio_wr32(info, o + 0xf4, bo);
923 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
924 mmio_wr32(info, o + 0xe4, as);
925 mmio_wr32(info, o + 0xf8, ao);
926 ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
927 mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs);
928 }
929 }
901} 930}
902 931
903static void 932static void
@@ -934,7 +963,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
934 963
935 nv_wr32(priv, 0x404154, 0x00000000); 964 nv_wr32(priv, 0x404154, 0x00000000);
936 965
937 oclass->mods(priv, info); 966 oclass->bundle(info);
967 oclass->pagepool(info);
968 oclass->attrib(info);
938 oclass->unkn(priv); 969 oclass->unkn(priv);
939 970
940 gm107_grctx_generate_tpcid(priv); 971 gm107_grctx_generate_tpcid(priv);
@@ -979,7 +1010,6 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
979 .wr32 = _nouveau_graph_context_wr32, 1010 .wr32 = _nouveau_graph_context_wr32,
980 }, 1011 },
981 .main = gm107_grctx_generate_main, 1012 .main = gm107_grctx_generate_main,
982 .mods = gm107_grctx_generate_mods,
983 .unkn = nve4_grctx_generate_unkn, 1013 .unkn = nve4_grctx_generate_unkn,
984 .hub = gm107_grctx_pack_hub, 1014 .hub = gm107_grctx_pack_hub,
985 .gpc = gm107_grctx_pack_gpc, 1015 .gpc = gm107_grctx_pack_gpc,
@@ -988,4 +1018,15 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
988 .ppc = gm107_grctx_pack_ppc, 1018 .ppc = gm107_grctx_pack_ppc,
989 .icmd = gm107_grctx_pack_icmd, 1019 .icmd = gm107_grctx_pack_icmd,
990 .mthd = gm107_grctx_pack_mthd, 1020 .mthd = gm107_grctx_pack_mthd,
1021 .bundle = gm107_grctx_generate_bundle,
1022 .bundle_size = 0x3000,
1023 .bundle_min_gpm_fifo_depth = 0x180,
1024 .bundle_token_limit = 0x2c0,
1025 .pagepool = gm107_grctx_generate_pagepool,
1026 .pagepool_size = 0x8000,
1027 .attrib = gm107_grctx_generate_attrib,
1028 .attrib_nr_max = 0xff0,
1029 .attrib_nr = 0xaa0,
1030 .alpha_nr_max = 0x1800,
1031 .alpha_nr = 0x1000,
991}.base; 1032}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index 8de4a4291548..ce252adbef81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -531,50 +531,6 @@ nv108_grctx_pack_ppc[] = {
531 * PGRAPH context implementation 531 * PGRAPH context implementation
532 ******************************************************************************/ 532 ******************************************************************************/
533 533
534static void
535nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
536{
537 u32 magic[GPC_MAX][2];
538 u32 offset;
539 int gpc;
540
541 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
542 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
543 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
544 mmio_list(0x40800c, 0x00000000, 8, 1);
545 mmio_list(0x408010, 0x80000000, 0, 0);
546 mmio_list(0x419004, 0x00000000, 8, 1);
547 mmio_list(0x419008, 0x00000000, 0, 0);
548 mmio_list(0x4064cc, 0x80000000, 0, 0);
549 mmio_list(0x408004, 0x00000000, 8, 0);
550 mmio_list(0x408008, 0x80000030, 0, 0);
551 mmio_list(0x418808, 0x00000000, 8, 0);
552 mmio_list(0x41880c, 0x80000030, 0, 0);
553 mmio_list(0x4064c8, 0x00c20200, 0, 0);
554 mmio_list(0x418810, 0x80000000, 12, 2);
555 mmio_list(0x419848, 0x10000000, 12, 2);
556
557 mmio_list(0x405830, 0x02180648, 0, 0);
558 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
559
560 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
561 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
562 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
563 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
564 magic[gpc][1] = 0x00000000 | (magic1 << 16);
565 offset += 0x0324 * priv->tpc_nr[gpc];
566 }
567
568 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
569 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
570 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
571 offset += 0x07ff * priv->tpc_nr[gpc];
572 }
573
574 mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
575 mmio_list(0x17e920, 0x00090d08, 0, 0);
576}
577
578struct nouveau_oclass * 534struct nouveau_oclass *
579nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { 535nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
580 .base.handle = NV_ENGCTX(GR, 0x08), 536 .base.handle = NV_ENGCTX(GR, 0x08),
@@ -587,7 +543,6 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
587 .wr32 = _nouveau_graph_context_wr32, 543 .wr32 = _nouveau_graph_context_wr32,
588 }, 544 },
589 .main = nve4_grctx_generate_main, 545 .main = nve4_grctx_generate_main,
590 .mods = nv108_grctx_generate_mods,
591 .unkn = nve4_grctx_generate_unkn, 546 .unkn = nve4_grctx_generate_unkn,
592 .hub = nv108_grctx_pack_hub, 547 .hub = nv108_grctx_pack_hub,
593 .gpc = nv108_grctx_pack_gpc, 548 .gpc = nv108_grctx_pack_gpc,
@@ -596,4 +551,15 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
596 .ppc = nv108_grctx_pack_ppc, 551 .ppc = nv108_grctx_pack_ppc,
597 .icmd = nv108_grctx_pack_icmd, 552 .icmd = nv108_grctx_pack_icmd,
598 .mthd = nvf0_grctx_pack_mthd, 553 .mthd = nvf0_grctx_pack_mthd,
554 .bundle = nve4_grctx_generate_bundle,
555 .bundle_size = 0x3000,
556 .bundle_min_gpm_fifo_depth = 0xc2,
557 .bundle_token_limit = 0x200,
558 .pagepool = nve4_grctx_generate_pagepool,
559 .pagepool_size = 0x8000,
560 .attrib = nvd7_grctx_generate_attrib,
561 .attrib_nr_max = 0x324,
562 .attrib_nr = 0x218,
563 .alpha_nr_max = 0x7ff,
564 .alpha_nr = 0x648,
599}.base; 565}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 833a96508c4e..b8e5fe60a1eb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -982,34 +982,93 @@ nvc0_grctx_pack_tpc[] = {
982 * PGRAPH context implementation 982 * PGRAPH context implementation
983 ******************************************************************************/ 983 ******************************************************************************/
984 984
985int
986nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
987{
988 if (info->data) {
989 info->buffer[info->buffer_nr] = round_up(info->addr, align);
990 info->addr = info->buffer[info->buffer_nr] + size;
991 info->data->size = size;
992 info->data->align = align;
993 info->data->access = access;
994 info->data++;
995 return info->buffer_nr++;
996 }
997 return -1;
998}
999
1000void
1001nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data,
1002 int shift, int buffer)
1003{
1004 if (info->data) {
1005 if (shift >= 0) {
1006 info->mmio->addr = addr;
1007 info->mmio->data = data;
1008 info->mmio->shift = shift;
1009 info->mmio->buffer = buffer;
1010 if (buffer >= 0)
1011 data |= info->buffer[buffer] >> shift;
1012 info->mmio++;
1013 } else
1014 return;
1015 } else {
1016 if (buffer >= 0)
1017 return;
1018 }
1019
1020 nv_wr32(info->priv, addr, data);
1021}
1022
1023void
1024nvc0_grctx_generate_bundle(struct nvc0_grctx *info)
1025{
1026 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
1027 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
1028 const int s = 8;
1029 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
1030 mmio_refn(info, 0x408004, 0x00000000, s, b);
1031 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
1032 mmio_refn(info, 0x418808, 0x00000000, s, b);
1033 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
1034}
1035
985void 1036void
986nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 1037nvc0_grctx_generate_pagepool(struct nvc0_grctx *info)
987{ 1038{
1039 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
1040 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
1041 const int s = 8;
1042 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
1043 mmio_refn(info, 0x40800c, 0x00000000, s, b);
1044 mmio_wr32(info, 0x408010, 0x80000000);
1045 mmio_refn(info, 0x419004, 0x00000000, s, b);
1046 mmio_wr32(info, 0x419008, 0x00000000);
1047}
1048
1049void
1050nvc0_grctx_generate_attrib(struct nvc0_grctx *info)
1051{
1052 struct nvc0_graph_priv *priv = info->priv;
1053 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
1054 const u32 attrib = impl->attrib_nr;
1055 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
1056 const u32 access = NV_MEM_ACCESS_RW;
1057 const int s = 12;
1058 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
988 int gpc, tpc; 1059 int gpc, tpc;
989 u32 offset; 1060 u32 bo = 0;
990 1061
991 mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 1062 mmio_refn(info, 0x418810, 0x80000000, s, b);
992 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 1063 mmio_refn(info, 0x419848, 0x10000000, s, b);
993 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 1064 mmio_wr32(info, 0x405830, (attrib << 16));
994 1065
995 mmio_list(0x408004, 0x00000000, 8, 0); 1066 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
996 mmio_list(0x408008, 0x80000018, 0, 0);
997 mmio_list(0x40800c, 0x00000000, 8, 1);
998 mmio_list(0x408010, 0x80000000, 0, 0);
999 mmio_list(0x418810, 0x80000000, 12, 2);
1000 mmio_list(0x419848, 0x10000000, 12, 2);
1001 mmio_list(0x419004, 0x00000000, 8, 1);
1002 mmio_list(0x419008, 0x00000000, 0, 0);
1003 mmio_list(0x418808, 0x00000000, 8, 0);
1004 mmio_list(0x41880c, 0x80000018, 0, 0);
1005
1006 mmio_list(0x405830, 0x02180000, 0, 0);
1007
1008 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
1009 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 1067 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1010 u32 addr = TPC_UNIT(gpc, tpc, 0x0520); 1068 const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
1011 mmio_list(addr, 0x02180000 | offset, 0, 0); 1069 mmio_skip(info, o, (attrib << 16) | ++bo);
1012 offset += 0x0324; 1070 mmio_wr32(info, o, (attrib << 16) | --bo);
1071 bo += impl->attrib_nr_max;
1013 } 1072 }
1014 } 1073 }
1015} 1074}
@@ -1170,7 +1229,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1170{ 1229{
1171 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 1230 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
1172 1231
1173 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 1232 nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
1174 1233
1175 nvc0_graph_mmio(priv, oclass->hub); 1234 nvc0_graph_mmio(priv, oclass->hub);
1176 nvc0_graph_mmio(priv, oclass->gpc); 1235 nvc0_graph_mmio(priv, oclass->gpc);
@@ -1180,7 +1239,9 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1180 1239
1181 nv_wr32(priv, 0x404154, 0x00000000); 1240 nv_wr32(priv, 0x404154, 0x00000000);
1182 1241
1183 oclass->mods(priv, info); 1242 oclass->bundle(info);
1243 oclass->pagepool(info);
1244 oclass->attrib(info);
1184 oclass->unkn(priv); 1245 oclass->unkn(priv);
1185 1246
1186 nvc0_grctx_generate_tpcid(priv); 1247 nvc0_grctx_generate_tpcid(priv);
@@ -1192,7 +1253,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
1192 nvc0_graph_icmd(priv, oclass->icmd); 1253 nvc0_graph_icmd(priv, oclass->icmd);
1193 nv_wr32(priv, 0x404154, 0x00000400); 1254 nv_wr32(priv, 0x404154, 0x00000400);
1194 nvc0_graph_mthd(priv, oclass->mthd); 1255 nvc0_graph_mthd(priv, oclass->mthd);
1195 nv_mask(priv, 0x000260, 0x00000001, 0x00000001); 1256 nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
1196} 1257}
1197 1258
1198int 1259int
@@ -1308,7 +1369,6 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
1308 .wr32 = _nouveau_graph_context_wr32, 1369 .wr32 = _nouveau_graph_context_wr32,
1309 }, 1370 },
1310 .main = nvc0_grctx_generate_main, 1371 .main = nvc0_grctx_generate_main,
1311 .mods = nvc0_grctx_generate_mods,
1312 .unkn = nvc0_grctx_generate_unkn, 1372 .unkn = nvc0_grctx_generate_unkn,
1313 .hub = nvc0_grctx_pack_hub, 1373 .hub = nvc0_grctx_pack_hub,
1314 .gpc = nvc0_grctx_pack_gpc, 1374 .gpc = nvc0_grctx_pack_gpc,
@@ -1316,4 +1376,11 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
1316 .tpc = nvc0_grctx_pack_tpc, 1376 .tpc = nvc0_grctx_pack_tpc,
1317 .icmd = nvc0_grctx_pack_icmd, 1377 .icmd = nvc0_grctx_pack_icmd,
1318 .mthd = nvc0_grctx_pack_mthd, 1378 .mthd = nvc0_grctx_pack_mthd,
1379 .bundle = nvc0_grctx_generate_bundle,
1380 .bundle_size = 0x1800,
1381 .pagepool = nvc0_grctx_generate_pagepool,
1382 .pagepool_size = 0x8000,
1383 .attrib = nvc0_grctx_generate_attrib,
1384 .attrib_nr_max = 0x324,
1385 .attrib_nr = 0x218,
1319}.base; 1386}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
index 8da8b627b9d0..c776cd715e33 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -12,12 +12,19 @@ struct nvc0_grctx {
12 u64 addr; 12 u64 addr;
13}; 13};
14 14
15int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access);
16void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int);
17
18#define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d))
19#define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e))
20#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
21#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
22
15struct nvc0_grctx_oclass { 23struct nvc0_grctx_oclass {
16 struct nouveau_oclass base; 24 struct nouveau_oclass base;
17 /* main context generation function */ 25 /* main context generation function */
18 void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *); 26 void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
19 /* context-specific modify-on-first-load list generation function */ 27 /* context-specific modify-on-first-load list generation function */
20 void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
21 void (*unkn)(struct nvc0_graph_priv *); 28 void (*unkn)(struct nvc0_graph_priv *);
22 /* mmio context data */ 29 /* mmio context data */
23 const struct nvc0_graph_pack *hub; 30 const struct nvc0_graph_pack *hub;
@@ -28,30 +35,34 @@ struct nvc0_grctx_oclass {
28 /* indirect context data, generated with icmds/mthds */ 35 /* indirect context data, generated with icmds/mthds */
29 const struct nvc0_graph_pack *icmd; 36 const struct nvc0_graph_pack *icmd;
30 const struct nvc0_graph_pack *mthd; 37 const struct nvc0_graph_pack *mthd;
38 /* bundle circular buffer */
39 void (*bundle)(struct nvc0_grctx *);
40 u32 bundle_size;
41 u32 bundle_min_gpm_fifo_depth;
42 u32 bundle_token_limit;
43 /* pagepool */
44 void (*pagepool)(struct nvc0_grctx *);
45 u32 pagepool_size;
46 /* attribute(/alpha) circular buffer */
47 void (*attrib)(struct nvc0_grctx *);
48 u32 attrib_nr_max;
49 u32 attrib_nr;
50 u32 alpha_nr_max;
51 u32 alpha_nr;
31}; 52};
32 53
33#define mmio_data(s,a,p) do { \ 54static inline const struct nvc0_grctx_oclass *
34 info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \ 55nvc0_grctx_impl(struct nvc0_graph_priv *priv)
35 info->addr = info->buffer[info->buffer_nr++] + (s); \ 56{
36 info->data->size = (s); \ 57 return (void *)nv_engine(priv)->cclass;
37 info->data->align = (a); \ 58}
38 info->data->access = (p); \
39 info->data++; \
40} while(0)
41
42#define mmio_list(r,d,s,b) do { \
43 info->mmio->addr = (r); \
44 info->mmio->data = (d); \
45 info->mmio->shift = (s); \
46 info->mmio->buffer = (b); \
47 info->mmio++; \
48 nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
49} while(0)
50 59
51extern struct nouveau_oclass *nvc0_grctx_oclass; 60extern struct nouveau_oclass *nvc0_grctx_oclass;
52int nvc0_grctx_generate(struct nvc0_graph_priv *); 61int nvc0_grctx_generate(struct nvc0_graph_priv *);
53void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); 62void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
54void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); 63void nvc0_grctx_generate_bundle(struct nvc0_grctx *);
64void nvc0_grctx_generate_pagepool(struct nvc0_grctx *);
65void nvc0_grctx_generate_attrib(struct nvc0_grctx *);
55void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *); 66void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
56void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *); 67void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
57void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *); 68void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
@@ -60,22 +71,27 @@ void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
60void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *); 71void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
61 72
62extern struct nouveau_oclass *nvc1_grctx_oclass; 73extern struct nouveau_oclass *nvc1_grctx_oclass;
63void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); 74void nvc1_grctx_generate_attrib(struct nvc0_grctx *);
64void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *); 75void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
65 76
66extern struct nouveau_oclass *nvc4_grctx_oclass; 77extern struct nouveau_oclass *nvc4_grctx_oclass;
67extern struct nouveau_oclass *nvc8_grctx_oclass; 78extern struct nouveau_oclass *nvc8_grctx_oclass;
79
68extern struct nouveau_oclass *nvd7_grctx_oclass; 80extern struct nouveau_oclass *nvd7_grctx_oclass;
81void nvd7_grctx_generate_attrib(struct nvc0_grctx *);
82
69extern struct nouveau_oclass *nvd9_grctx_oclass; 83extern struct nouveau_oclass *nvd9_grctx_oclass;
70 84
71extern struct nouveau_oclass *nve4_grctx_oclass; 85extern struct nouveau_oclass *nve4_grctx_oclass;
72extern struct nouveau_oclass *gk20a_grctx_oclass; 86extern struct nouveau_oclass *gk20a_grctx_oclass;
73void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); 87void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
74void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); 88void nve4_grctx_generate_bundle(struct nvc0_grctx *);
89void nve4_grctx_generate_pagepool(struct nvc0_grctx *);
75void nve4_grctx_generate_unkn(struct nvc0_graph_priv *); 90void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
76void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); 91void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
77 92
78extern struct nouveau_oclass *nvf0_grctx_oclass; 93extern struct nouveau_oclass *nvf0_grctx_oclass;
94extern struct nouveau_oclass *gk110b_grctx_oclass;
79extern struct nouveau_oclass *nv108_grctx_oclass; 95extern struct nouveau_oclass *nv108_grctx_oclass;
80extern struct nouveau_oclass *gm107_grctx_oclass; 96extern struct nouveau_oclass *gm107_grctx_oclass;
81 97
@@ -160,16 +176,23 @@ extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
160extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[]; 176extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
161extern const struct nvc0_graph_init nve4_grctx_init_a097_0[]; 177extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
162 178
179extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[];
180
163extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[]; 181extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
164 182
183extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[];
165extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[]; 184extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
166extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[]; 185extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
167 186
187extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[];
168extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[]; 188extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
169 189
190extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[];
170extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[]; 191extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
171extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[]; 192extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
172 193
194extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[];
195
173extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[]; 196extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
174 197
175extern const struct nvc0_graph_init nv108_grctx_init_prop_0[]; 198extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index 24a92c569c0a..c6ba8fed18f1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -727,38 +727,38 @@ nvc1_grctx_pack_tpc[] = {
727 ******************************************************************************/ 727 ******************************************************************************/
728 728
729void 729void
730nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 730nvc1_grctx_generate_attrib(struct nvc0_grctx *info)
731{ 731{
732 struct nvc0_graph_priv *priv = info->priv;
733 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
734 const u32 alpha = impl->alpha_nr;
735 const u32 beta = impl->attrib_nr;
736 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
737 const u32 access = NV_MEM_ACCESS_RW;
738 const int s = 12;
739 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
740 const int timeslice_mode = 1;
741 const int max_batches = 0xffff;
742 u32 bo = 0;
743 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
732 int gpc, tpc; 744 int gpc, tpc;
733 u32 offset;
734 745
735 mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 746 mmio_refn(info, 0x418810, 0x80000000, s, b);
736 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 747 mmio_refn(info, 0x419848, 0x10000000, s, b);
737 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 748 mmio_wr32(info, 0x405830, (beta << 16) | alpha);
738 mmio_list(0x408004, 0x00000000, 8, 0); 749 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
739 mmio_list(0x408008, 0x80000018, 0, 0);
740 mmio_list(0x40800c, 0x00000000, 8, 1);
741 mmio_list(0x408010, 0x80000000, 0, 0);
742 mmio_list(0x418810, 0x80000000, 12, 2);
743 mmio_list(0x419848, 0x10000000, 12, 2);
744 mmio_list(0x419004, 0x00000000, 8, 1);
745 mmio_list(0x419008, 0x00000000, 0, 0);
746 mmio_list(0x418808, 0x00000000, 8, 0);
747 mmio_list(0x41880c, 0x80000018, 0, 0);
748 750
749 mmio_list(0x405830, 0x02180218, 0, 0); 751 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
750 mmio_list(0x4064c4, 0x0086ffff, 0, 0);
751
752 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
753 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
754 u32 addr = TPC_UNIT(gpc, tpc, 0x0520);
755 mmio_list(addr, 0x12180000 | offset, 0, 0);
756 offset += 0x0324;
757 }
758 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { 752 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
759 u32 addr = TPC_UNIT(gpc, tpc, 0x0544); 753 const u32 a = alpha;
760 mmio_list(addr, 0x02180000 | offset, 0, 0); 754 const u32 b = beta;
761 offset += 0x0324; 755 const u32 t = timeslice_mode;
756 const u32 o = TPC_UNIT(gpc, tpc, 0x500);
757 mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
758 mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
759 bo += impl->attrib_nr_max;
760 mmio_wr32(info, o + 0x44, (a << 16) | ao);
761 ao += impl->alpha_nr_max;
762 } 762 }
763 } 763 }
764} 764}
@@ -786,7 +786,6 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
786 .wr32 = _nouveau_graph_context_wr32, 786 .wr32 = _nouveau_graph_context_wr32,
787 }, 787 },
788 .main = nvc0_grctx_generate_main, 788 .main = nvc0_grctx_generate_main,
789 .mods = nvc1_grctx_generate_mods,
790 .unkn = nvc1_grctx_generate_unkn, 789 .unkn = nvc1_grctx_generate_unkn,
791 .hub = nvc1_grctx_pack_hub, 790 .hub = nvc1_grctx_pack_hub,
792 .gpc = nvc1_grctx_pack_gpc, 791 .gpc = nvc1_grctx_pack_gpc,
@@ -794,4 +793,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
794 .tpc = nvc1_grctx_pack_tpc, 793 .tpc = nvc1_grctx_pack_tpc,
795 .icmd = nvc1_grctx_pack_icmd, 794 .icmd = nvc1_grctx_pack_icmd,
796 .mthd = nvc1_grctx_pack_mthd, 795 .mthd = nvc1_grctx_pack_mthd,
796 .bundle = nvc0_grctx_generate_bundle,
797 .bundle_size = 0x1800,
798 .pagepool = nvc0_grctx_generate_pagepool,
799 .pagepool_size = 0x8000,
800 .attrib = nvc1_grctx_generate_attrib,
801 .attrib_nr_max = 0x324,
802 .attrib_nr = 0x218,
803 .alpha_nr_max = 0x324,
804 .alpha_nr = 0x218,
797}.base; 805}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
index e11ed5538193..41705c60cc47 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
@@ -92,7 +92,6 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
92 .wr32 = _nouveau_graph_context_wr32, 92 .wr32 = _nouveau_graph_context_wr32,
93 }, 93 },
94 .main = nvc0_grctx_generate_main, 94 .main = nvc0_grctx_generate_main,
95 .mods = nvc0_grctx_generate_mods,
96 .unkn = nvc0_grctx_generate_unkn, 95 .unkn = nvc0_grctx_generate_unkn,
97 .hub = nvc0_grctx_pack_hub, 96 .hub = nvc0_grctx_pack_hub,
98 .gpc = nvc0_grctx_pack_gpc, 97 .gpc = nvc0_grctx_pack_gpc,
@@ -100,4 +99,11 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
100 .tpc = nvc4_grctx_pack_tpc, 99 .tpc = nvc4_grctx_pack_tpc,
101 .icmd = nvc0_grctx_pack_icmd, 100 .icmd = nvc0_grctx_pack_icmd,
102 .mthd = nvc0_grctx_pack_mthd, 101 .mthd = nvc0_grctx_pack_mthd,
102 .bundle = nvc0_grctx_generate_bundle,
103 .bundle_size = 0x1800,
104 .pagepool = nvc0_grctx_generate_pagepool,
105 .pagepool_size = 0x8000,
106 .attrib = nvc0_grctx_generate_attrib,
107 .attrib_nr_max = 0x324,
108 .attrib_nr = 0x218,
103}.base; 109}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
index feebd58dfe8d..8f804cd8f9c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
@@ -343,7 +343,6 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
343 .wr32 = _nouveau_graph_context_wr32, 343 .wr32 = _nouveau_graph_context_wr32,
344 }, 344 },
345 .main = nvc0_grctx_generate_main, 345 .main = nvc0_grctx_generate_main,
346 .mods = nvc0_grctx_generate_mods,
347 .unkn = nvc0_grctx_generate_unkn, 346 .unkn = nvc0_grctx_generate_unkn,
348 .hub = nvc0_grctx_pack_hub, 347 .hub = nvc0_grctx_pack_hub,
349 .gpc = nvc8_grctx_pack_gpc, 348 .gpc = nvc8_grctx_pack_gpc,
@@ -351,4 +350,11 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
351 .tpc = nvc0_grctx_pack_tpc, 350 .tpc = nvc0_grctx_pack_tpc,
352 .icmd = nvc8_grctx_pack_icmd, 351 .icmd = nvc8_grctx_pack_icmd,
353 .mthd = nvc8_grctx_pack_mthd, 352 .mthd = nvc8_grctx_pack_mthd,
353 .bundle = nvc0_grctx_generate_bundle,
354 .bundle_size = 0x1800,
355 .pagepool = nvc0_grctx_generate_pagepool,
356 .pagepool_size = 0x8000,
357 .attrib = nvc0_grctx_generate_attrib,
358 .attrib_nr_max = 0x324,
359 .attrib_nr = 0x218,
354}.base; 360}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 1dbc8d7f2e86..fcf534fd9e65 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -177,44 +177,41 @@ nvd7_grctx_pack_ppc[] = {
177 * PGRAPH context implementation 177 * PGRAPH context implementation
178 ******************************************************************************/ 178 ******************************************************************************/
179 179
180static void 180void
181nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 181nvd7_grctx_generate_attrib(struct nvc0_grctx *info)
182{ 182{
183 u32 magic[GPC_MAX][2]; 183 struct nvc0_graph_priv *priv = info->priv;
184 u32 offset; 184 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
185 int gpc; 185 const u32 alpha = impl->alpha_nr;
186 186 const u32 beta = impl->attrib_nr;
187 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 187 const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
188 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 188 const u32 access = NV_MEM_ACCESS_RW;
189 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 189 const int s = 12;
190 mmio_list(0x40800c, 0x00000000, 8, 1); 190 const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
191 mmio_list(0x408010, 0x80000000, 0, 0); 191 const int timeslice_mode = 1;
192 mmio_list(0x419004, 0x00000000, 8, 1); 192 const int max_batches = 0xffff;
193 mmio_list(0x419008, 0x00000000, 0, 0); 193 u32 bo = 0;
194 mmio_list(0x408004, 0x00000000, 8, 0); 194 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
195 mmio_list(0x408008, 0x80000018, 0, 0); 195 int gpc, ppc;
196 mmio_list(0x418808, 0x00000000, 8, 0);
197 mmio_list(0x41880c, 0x80000018, 0, 0);
198 mmio_list(0x418810, 0x80000000, 12, 2);
199 mmio_list(0x419848, 0x10000000, 12, 2);
200 196
201 mmio_list(0x405830, 0x02180324, 0, 0); 197 mmio_refn(info, 0x418810, 0x80000000, s, b);
202 mmio_list(0x4064c4, 0x00c9ffff, 0, 0); 198 mmio_refn(info, 0x419848, 0x10000000, s, b);
203 199 mmio_wr32(info, 0x405830, (beta << 16) | alpha);
204 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { 200 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
205 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
206 u16 magic1 = 0x0324 * priv->tpc_nr[gpc];
207 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
208 magic[gpc][1] = 0x00000000 | (magic1 << 16);
209 offset += 0x0324 * priv->tpc_nr[gpc];
210 }
211 201
212 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 202 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
213 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); 203 for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
214 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); 204 const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
215 offset += 0x07ff * priv->tpc_nr[gpc]; 205 const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc];
206 const u32 t = timeslice_mode;
207 const u32 o = PPC_UNIT(gpc, ppc, 0);
208 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
209 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
210 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
211 mmio_wr32(info, o + 0xe4, (a << 16) | ao);
212 ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
213 }
216 } 214 }
217 mmio_list(0x17e91c, 0x03060609, 0, 0); /* different from kepler */
218} 215}
219 216
220void 217void
@@ -223,7 +220,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
223 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 220 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
224 int i; 221 int i;
225 222
226 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 223 nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
227 224
228 nvc0_graph_mmio(priv, oclass->hub); 225 nvc0_graph_mmio(priv, oclass->hub);
229 nvc0_graph_mmio(priv, oclass->gpc); 226 nvc0_graph_mmio(priv, oclass->gpc);
@@ -233,7 +230,9 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
233 230
234 nv_wr32(priv, 0x404154, 0x00000000); 231 nv_wr32(priv, 0x404154, 0x00000000);
235 232
236 oclass->mods(priv, info); 233 oclass->bundle(info);
234 oclass->pagepool(info);
235 oclass->attrib(info);
237 oclass->unkn(priv); 236 oclass->unkn(priv);
238 237
239 nvc0_grctx_generate_tpcid(priv); 238 nvc0_grctx_generate_tpcid(priv);
@@ -248,7 +247,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
248 nvc0_graph_icmd(priv, oclass->icmd); 247 nvc0_graph_icmd(priv, oclass->icmd);
249 nv_wr32(priv, 0x404154, 0x00000400); 248 nv_wr32(priv, 0x404154, 0x00000400);
250 nvc0_graph_mthd(priv, oclass->mthd); 249 nvc0_graph_mthd(priv, oclass->mthd);
251 nv_mask(priv, 0x000260, 0x00000001, 0x00000001); 250 nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
252} 251}
253 252
254struct nouveau_oclass * 253struct nouveau_oclass *
@@ -263,7 +262,6 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
263 .wr32 = _nouveau_graph_context_wr32, 262 .wr32 = _nouveau_graph_context_wr32,
264 }, 263 },
265 .main = nvd7_grctx_generate_main, 264 .main = nvd7_grctx_generate_main,
266 .mods = nvd7_grctx_generate_mods,
267 .unkn = nve4_grctx_generate_unkn, 265 .unkn = nve4_grctx_generate_unkn,
268 .hub = nvd7_grctx_pack_hub, 266 .hub = nvd7_grctx_pack_hub,
269 .gpc = nvd7_grctx_pack_gpc, 267 .gpc = nvd7_grctx_pack_gpc,
@@ -272,4 +270,13 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
272 .ppc = nvd7_grctx_pack_ppc, 270 .ppc = nvd7_grctx_pack_ppc,
273 .icmd = nvd9_grctx_pack_icmd, 271 .icmd = nvd9_grctx_pack_icmd,
274 .mthd = nvd9_grctx_pack_mthd, 272 .mthd = nvd9_grctx_pack_mthd,
273 .bundle = nvc0_grctx_generate_bundle,
274 .bundle_size = 0x1800,
275 .pagepool = nvc0_grctx_generate_pagepool,
276 .pagepool_size = 0x8000,
277 .attrib = nvd7_grctx_generate_attrib,
278 .attrib_nr_max = 0x324,
279 .attrib_nr = 0x218,
280 .alpha_nr_max = 0x7ff,
281 .alpha_nr = 0x324,
275}.base; 282}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index c665fb7e4660..b9a301b6fd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -511,7 +511,6 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
511 .wr32 = _nouveau_graph_context_wr32, 511 .wr32 = _nouveau_graph_context_wr32,
512 }, 512 },
513 .main = nvc0_grctx_generate_main, 513 .main = nvc0_grctx_generate_main,
514 .mods = nvc1_grctx_generate_mods,
515 .unkn = nvc1_grctx_generate_unkn, 514 .unkn = nvc1_grctx_generate_unkn,
516 .hub = nvd9_grctx_pack_hub, 515 .hub = nvd9_grctx_pack_hub,
517 .gpc = nvd9_grctx_pack_gpc, 516 .gpc = nvd9_grctx_pack_gpc,
@@ -519,4 +518,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
519 .tpc = nvd9_grctx_pack_tpc, 518 .tpc = nvd9_grctx_pack_tpc,
520 .icmd = nvd9_grctx_pack_icmd, 519 .icmd = nvd9_grctx_pack_icmd,
521 .mthd = nvd9_grctx_pack_mthd, 520 .mthd = nvd9_grctx_pack_mthd,
521 .bundle = nvc0_grctx_generate_bundle,
522 .bundle_size = 0x1800,
523 .pagepool = nvc0_grctx_generate_pagepool,
524 .pagepool_size = 0x8000,
525 .attrib = nvc1_grctx_generate_attrib,
526 .attrib_nr_max = 0x324,
527 .attrib_nr = 0x218,
528 .alpha_nr_max = 0x324,
529 .alpha_nr = 0x218,
522}.base; 530}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index c5b249238587..ccac2ee1a1cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -839,47 +839,34 @@ nve4_grctx_pack_ppc[] = {
839 ******************************************************************************/ 839 ******************************************************************************/
840 840
841void 841void
842nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) 842nve4_grctx_generate_bundle(struct nvc0_grctx *info)
843{ 843{
844 u32 magic[GPC_MAX][2]; 844 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
845 u32 offset; 845 const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
846 int gpc; 846 impl->bundle_size / 0x20);
847 847 const u32 token_limit = impl->bundle_token_limit;
848 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 848 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
849 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 849 const int s = 8;
850 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 850 const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
851 mmio_list(0x40800c, 0x00000000, 8, 1); 851 mmio_refn(info, 0x408004, 0x00000000, s, b);
852 mmio_list(0x408010, 0x80000000, 0, 0); 852 mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
853 mmio_list(0x419004, 0x00000000, 8, 1); 853 mmio_refn(info, 0x418808, 0x00000000, s, b);
854 mmio_list(0x419008, 0x00000000, 0, 0); 854 mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
855 mmio_list(0x4064cc, 0x80000000, 0, 0); 855 mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
856 mmio_list(0x408004, 0x00000000, 8, 0); 856}
857 mmio_list(0x408008, 0x80000030, 0, 0);
858 mmio_list(0x418808, 0x00000000, 8, 0);
859 mmio_list(0x41880c, 0x80000030, 0, 0);
860 mmio_list(0x4064c8, 0x01800600, 0, 0);
861 mmio_list(0x418810, 0x80000000, 12, 2);
862 mmio_list(0x419848, 0x10000000, 12, 2);
863
864 mmio_list(0x405830, 0x02180648, 0, 0);
865 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
866
867 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
868 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
869 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
870 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
871 magic[gpc][1] = 0x00000000 | (magic1 << 16);
872 offset += 0x0324 * priv->tpc_nr[gpc];
873 }
874
875 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
876 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
877 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
878 offset += 0x07ff * priv->tpc_nr[gpc];
879 }
880 857
881 mmio_list(0x17e91c, 0x06060609, 0, 0); 858void
882 mmio_list(0x17e920, 0x00090a05, 0, 0); 859nve4_grctx_generate_pagepool(struct nvc0_grctx *info)
860{
861 const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
862 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
863 const int s = 8;
864 const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
865 mmio_refn(info, 0x40800c, 0x00000000, s, b);
866 mmio_wr32(info, 0x408010, 0x80000000);
867 mmio_refn(info, 0x419004, 0x00000000, s, b);
868 mmio_wr32(info, 0x419008, 0x00000000);
869 mmio_wr32(info, 0x4064cc, 0x80000000);
883} 870}
884 871
885void 872void
@@ -957,7 +944,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
957 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; 944 struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
958 int i; 945 int i;
959 946
960 nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 947 nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
961 948
962 nvc0_graph_mmio(priv, oclass->hub); 949 nvc0_graph_mmio(priv, oclass->hub);
963 nvc0_graph_mmio(priv, oclass->gpc); 950 nvc0_graph_mmio(priv, oclass->gpc);
@@ -967,7 +954,9 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
967 954
968 nv_wr32(priv, 0x404154, 0x00000000); 955 nv_wr32(priv, 0x404154, 0x00000000);
969 956
970 oclass->mods(priv, info); 957 oclass->bundle(info);
958 oclass->pagepool(info);
959 oclass->attrib(info);
971 oclass->unkn(priv); 960 oclass->unkn(priv);
972 961
973 nvc0_grctx_generate_tpcid(priv); 962 nvc0_grctx_generate_tpcid(priv);
@@ -991,7 +980,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
991 nvc0_graph_icmd(priv, oclass->icmd); 980 nvc0_graph_icmd(priv, oclass->icmd);
992 nv_wr32(priv, 0x404154, 0x00000400); 981 nv_wr32(priv, 0x404154, 0x00000400);
993 nvc0_graph_mthd(priv, oclass->mthd); 982 nvc0_graph_mthd(priv, oclass->mthd);
994 nv_mask(priv, 0x000260, 0x00000001, 0x00000001); 983 nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
995 984
996 nv_mask(priv, 0x418800, 0x00200000, 0x00200000); 985 nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
997 nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); 986 nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
@@ -1009,7 +998,6 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
1009 .wr32 = _nouveau_graph_context_wr32, 998 .wr32 = _nouveau_graph_context_wr32,
1010 }, 999 },
1011 .main = nve4_grctx_generate_main, 1000 .main = nve4_grctx_generate_main,
1012 .mods = nve4_grctx_generate_mods,
1013 .unkn = nve4_grctx_generate_unkn, 1001 .unkn = nve4_grctx_generate_unkn,
1014 .hub = nve4_grctx_pack_hub, 1002 .hub = nve4_grctx_pack_hub,
1015 .gpc = nve4_grctx_pack_gpc, 1003 .gpc = nve4_grctx_pack_gpc,
@@ -1018,4 +1006,15 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
1018 .ppc = nve4_grctx_pack_ppc, 1006 .ppc = nve4_grctx_pack_ppc,
1019 .icmd = nve4_grctx_pack_icmd, 1007 .icmd = nve4_grctx_pack_icmd,
1020 .mthd = nve4_grctx_pack_mthd, 1008 .mthd = nve4_grctx_pack_mthd,
1009 .bundle = nve4_grctx_generate_bundle,
1010 .bundle_size = 0x3000,
1011 .bundle_min_gpm_fifo_depth = 0x180,
1012 .bundle_token_limit = 0x600,
1013 .pagepool = nve4_grctx_generate_pagepool,
1014 .pagepool_size = 0x8000,
1015 .attrib = nvd7_grctx_generate_attrib,
1016 .attrib_nr_max = 0x324,
1017 .attrib_nr = 0x218,
1018 .alpha_nr_max = 0x7ff,
1019 .alpha_nr = 0x648,
1021}.base; 1020}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dec03f04114d..e9b0dcf95a49 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -279,7 +279,7 @@ nvf0_grctx_init_icmd_0[] = {
279 {} 279 {}
280}; 280};
281 281
282static const struct nvc0_graph_pack 282const struct nvc0_graph_pack
283nvf0_grctx_pack_icmd[] = { 283nvf0_grctx_pack_icmd[] = {
284 { nvf0_grctx_init_icmd_0 }, 284 { nvf0_grctx_init_icmd_0 },
285 {} 285 {}
@@ -668,7 +668,7 @@ nvf0_grctx_init_be_0[] = {
668 {} 668 {}
669}; 669};
670 670
671static const struct nvc0_graph_pack 671const struct nvc0_graph_pack
672nvf0_grctx_pack_hub[] = { 672nvf0_grctx_pack_hub[] = {
673 { nvc0_grctx_init_main_0 }, 673 { nvc0_grctx_init_main_0 },
674 { nvf0_grctx_init_fe_0 }, 674 { nvf0_grctx_init_fe_0 },
@@ -704,7 +704,7 @@ nvf0_grctx_init_gpc_unk_2[] = {
704 {} 704 {}
705}; 705};
706 706
707static const struct nvc0_graph_pack 707const struct nvc0_graph_pack
708nvf0_grctx_pack_gpc[] = { 708nvf0_grctx_pack_gpc[] = {
709 { nvc0_grctx_init_gpc_unk_0 }, 709 { nvc0_grctx_init_gpc_unk_0 },
710 { nvd9_grctx_init_prop_0 }, 710 { nvd9_grctx_init_prop_0 },
@@ -718,7 +718,7 @@ nvf0_grctx_pack_gpc[] = {
718 {} 718 {}
719}; 719};
720 720
721static const struct nvc0_graph_init 721const struct nvc0_graph_init
722nvf0_grctx_init_tex_0[] = { 722nvf0_grctx_init_tex_0[] = {
723 { 0x419a00, 1, 0x04, 0x000000f0 }, 723 { 0x419a00, 1, 0x04, 0x000000f0 },
724 { 0x419a04, 1, 0x04, 0x00000001 }, 724 { 0x419a04, 1, 0x04, 0x00000001 },
@@ -797,7 +797,7 @@ nvf0_grctx_init_cbm_0[] = {
797 {} 797 {}
798}; 798};
799 799
800static const struct nvc0_graph_pack 800const struct nvc0_graph_pack
801nvf0_grctx_pack_ppc[] = { 801nvf0_grctx_pack_ppc[] = {
802 { nve4_grctx_init_pes_0 }, 802 { nve4_grctx_init_pes_0 },
803 { nvf0_grctx_init_cbm_0 }, 803 { nvf0_grctx_init_cbm_0 },
@@ -809,58 +809,6 @@ nvf0_grctx_pack_ppc[] = {
809 * PGRAPH context implementation 809 * PGRAPH context implementation
810 ******************************************************************************/ 810 ******************************************************************************/
811 811
812static void
813nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
814{
815 u32 magic[GPC_MAX][4];
816 u32 offset;
817 int gpc;
818
819 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
820 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
821 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
822 mmio_list(0x40800c, 0x00000000, 8, 1);
823 mmio_list(0x408010, 0x80000000, 0, 0);
824 mmio_list(0x419004, 0x00000000, 8, 1);
825 mmio_list(0x419008, 0x00000000, 0, 0);
826 mmio_list(0x4064cc, 0x80000000, 0, 0);
827 mmio_list(0x408004, 0x00000000, 8, 0);
828 mmio_list(0x408008, 0x80000030, 0, 0);
829 mmio_list(0x418808, 0x00000000, 8, 0);
830 mmio_list(0x41880c, 0x80000030, 0, 0);
831 mmio_list(0x4064c8, 0x01800600, 0, 0);
832 mmio_list(0x418810, 0x80000000, 12, 2);
833 mmio_list(0x419848, 0x10000000, 12, 2);
834
835 mmio_list(0x405830, 0x02180648, 0, 0);
836 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
837
838 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
839 u16 magic0 = 0x0218 * (priv->tpc_nr[gpc] - 1);
840 u16 magic1 = 0x0648 * (priv->tpc_nr[gpc] - 1);
841 u16 magic2 = 0x0218;
842 u16 magic3 = 0x0648;
843 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
844 magic[gpc][1] = 0x00000000 | (magic1 << 16);
845 offset += 0x0324 * (priv->tpc_nr[gpc] - 1);
846 magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset;
847 magic[gpc][3] = 0x00000000 | (magic3 << 16);
848 offset += 0x0324;
849 }
850
851 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
852 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
853 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
854 offset += 0x07ff * (priv->tpc_nr[gpc] - 1);
855 mmio_list(GPC_UNIT(gpc, 0x32c0), magic[gpc][2], 0, 0);
856 mmio_list(GPC_UNIT(gpc, 0x32e4), magic[gpc][3] | offset, 0, 0);
857 offset += 0x07ff;
858 }
859
860 mmio_list(0x17e91c, 0x06060609, 0, 0);
861 mmio_list(0x17e920, 0x00090a05, 0, 0);
862}
863
864struct nouveau_oclass * 812struct nouveau_oclass *
865nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { 813nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
866 .base.handle = NV_ENGCTX(GR, 0xf0), 814 .base.handle = NV_ENGCTX(GR, 0xf0),
@@ -873,7 +821,6 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
873 .wr32 = _nouveau_graph_context_wr32, 821 .wr32 = _nouveau_graph_context_wr32,
874 }, 822 },
875 .main = nve4_grctx_generate_main, 823 .main = nve4_grctx_generate_main,
876 .mods = nvf0_grctx_generate_mods,
877 .unkn = nve4_grctx_generate_unkn, 824 .unkn = nve4_grctx_generate_unkn,
878 .hub = nvf0_grctx_pack_hub, 825 .hub = nvf0_grctx_pack_hub,
879 .gpc = nvf0_grctx_pack_gpc, 826 .gpc = nvf0_grctx_pack_gpc,
@@ -882,4 +829,15 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
882 .ppc = nvf0_grctx_pack_ppc, 829 .ppc = nvf0_grctx_pack_ppc,
883 .icmd = nvf0_grctx_pack_icmd, 830 .icmd = nvf0_grctx_pack_icmd,
884 .mthd = nvf0_grctx_pack_mthd, 831 .mthd = nvf0_grctx_pack_mthd,
832 .bundle = nve4_grctx_generate_bundle,
833 .bundle_size = 0x3000,
834 .bundle_min_gpm_fifo_depth = 0x180,
835 .bundle_token_limit = 0x7c0,
836 .pagepool = nve4_grctx_generate_pagepool,
837 .pagepool_size = 0x8000,
838 .attrib = nvd7_grctx_generate_attrib,
839 .attrib_nr_max = 0x324,
840 .attrib_nr = 0x218,
841 .alpha_nr_max = 0x7ff,
842 .alpha_nr = 0x648,
885}.base; 843}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
new file mode 100644
index 000000000000..d07b19dc168d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nvc0.h"
26#include "ctxnvc0.h"
27
28/*******************************************************************************
29 * PGRAPH register lists
30 ******************************************************************************/
31
32static const struct nvc0_graph_init
33gk110b_graph_init_l1c_0[] = {
34 { 0x419c98, 1, 0x04, 0x00000000 },
35 { 0x419ca8, 1, 0x04, 0x00000000 },
36 { 0x419cb0, 1, 0x04, 0x09000000 },
37 { 0x419cb4, 1, 0x04, 0x00000000 },
38 { 0x419cb8, 1, 0x04, 0x00b08bea },
39 { 0x419c84, 1, 0x04, 0x00010384 },
40 { 0x419cbc, 1, 0x04, 0x281b3646 },
41 { 0x419cc0, 2, 0x04, 0x00000000 },
42 { 0x419c80, 1, 0x04, 0x00020230 },
43 { 0x419ccc, 2, 0x04, 0x00000000 },
44 {}
45};
46
47static const struct nvc0_graph_init
48gk110b_graph_init_sm_0[] = {
49 { 0x419e00, 1, 0x04, 0x00000080 },
50 { 0x419ea0, 1, 0x04, 0x00000000 },
51 { 0x419ee4, 1, 0x04, 0x00000000 },
52 { 0x419ea4, 1, 0x04, 0x00000100 },
53 { 0x419ea8, 1, 0x04, 0x00000000 },
54 { 0x419eb4, 1, 0x04, 0x00000000 },
55 { 0x419ebc, 2, 0x04, 0x00000000 },
56 { 0x419edc, 1, 0x04, 0x00000000 },
57 { 0x419f00, 1, 0x04, 0x00000000 },
58 { 0x419ed0, 1, 0x04, 0x00002616 },
59 { 0x419f74, 1, 0x04, 0x00015555 },
60 { 0x419f80, 4, 0x04, 0x00000000 },
61 {}
62};
63
64static const struct nvc0_graph_pack
65gk110b_graph_pack_mmio[] = {
66 { nve4_graph_init_main_0 },
67 { nvf0_graph_init_fe_0 },
68 { nvc0_graph_init_pri_0 },
69 { nvc0_graph_init_rstr2d_0 },
70 { nvd9_graph_init_pd_0 },
71 { nvf0_graph_init_ds_0 },
72 { nvc0_graph_init_scc_0 },
73 { nvf0_graph_init_sked_0 },
74 { nvf0_graph_init_cwd_0 },
75 { nvd9_graph_init_prop_0 },
76 { nvc1_graph_init_gpc_unk_0 },
77 { nvc0_graph_init_setup_0 },
78 { nvc0_graph_init_crstr_0 },
79 { nvc1_graph_init_setup_1 },
80 { nvc0_graph_init_zcull_0 },
81 { nvd9_graph_init_gpm_0 },
82 { nvf0_graph_init_gpc_unk_1 },
83 { nvc0_graph_init_gcc_0 },
84 { nve4_graph_init_tpccs_0 },
85 { nvf0_graph_init_tex_0 },
86 { nve4_graph_init_pe_0 },
87 { gk110b_graph_init_l1c_0 },
88 { nvc0_graph_init_mpc_0 },
89 { gk110b_graph_init_sm_0 },
90 { nvd7_graph_init_pes_0 },
91 { nvd7_graph_init_wwdx_0 },
92 { nvd7_graph_init_cbm_0 },
93 { nve4_graph_init_be_0 },
94 { nvc0_graph_init_fe_1 },
95 {}
96};
97
98/*******************************************************************************
99 * PGRAPH engine/subdev functions
100 ******************************************************************************/
101
102struct nouveau_oclass *
103gk110b_graph_oclass = &(struct nvc0_graph_oclass) {
104 .base.handle = NV_ENGINE(GR, 0xf1),
105 .base.ofuncs = &(struct nouveau_ofuncs) {
106 .ctor = nvc0_graph_ctor,
107 .dtor = nvc0_graph_dtor,
108 .init = nve4_graph_init,
109 .fini = nvf0_graph_fini,
110 },
111 .cclass = &gk110b_grctx_oclass,
112 .sclass = nvf0_graph_sclass,
113 .mmio = gk110b_graph_pack_mmio,
114 .fecs.ucode = &nvf0_graph_fecs_ucode,
115 .gpccs.ucode = &nvf0_graph_gpccs_ucode,
116 .ppc_nr = 2,
117}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
index 83048a56430d..7d0abe9f3fe7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
@@ -27,8 +27,8 @@ static struct nouveau_oclass
27gk20a_graph_sclass[] = { 27gk20a_graph_sclass[] = {
28 { 0x902d, &nouveau_object_ofuncs }, 28 { 0x902d, &nouveau_object_ofuncs },
29 { 0xa040, &nouveau_object_ofuncs }, 29 { 0xa040, &nouveau_object_ofuncs },
30 { 0xa297, &nouveau_object_ofuncs }, 30 { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
31 { 0xa0c0, &nouveau_object_ofuncs }, 31 { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
32 {} 32 {}
33}; 33};
34 34
@@ -39,9 +39,10 @@ gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
39 .ctor = nvc0_graph_ctor, 39 .ctor = nvc0_graph_ctor,
40 .dtor = nvc0_graph_dtor, 40 .dtor = nvc0_graph_dtor,
41 .init = nve4_graph_init, 41 .init = nve4_graph_init,
42 .fini = nve4_graph_fini, 42 .fini = _nouveau_graph_fini,
43 }, 43 },
44 .cclass = &gk20a_grctx_oclass, 44 .cclass = &gk20a_grctx_oclass,
45 .sclass = gk20a_graph_sclass, 45 .sclass = gk20a_graph_sclass,
46 .mmio = nve4_graph_pack_mmio, 46 .mmio = nve4_graph_pack_mmio,
47 .ppc_nr = 1,
47}.base; 48}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
index 21c5f31d607f..4bdbdab2fd9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
@@ -36,8 +36,8 @@ static struct nouveau_oclass
36gm107_graph_sclass[] = { 36gm107_graph_sclass[] = {
37 { 0x902d, &nouveau_object_ofuncs }, 37 { 0x902d, &nouveau_object_ofuncs },
38 { 0xa140, &nouveau_object_ofuncs }, 38 { 0xa140, &nouveau_object_ofuncs },
39 { 0xb097, &nouveau_object_ofuncs }, 39 { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
40 { 0xb0c0, &nouveau_object_ofuncs }, 40 { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
41 {} 41 {}
42}; 42};
43 43
@@ -425,6 +425,9 @@ gm107_graph_init(struct nouveau_object *object)
425 nv_wr32(priv, 0x400134, 0xffffffff); 425 nv_wr32(priv, 0x400134, 0xffffffff);
426 426
427 nv_wr32(priv, 0x400054, 0x2c350f63); 427 nv_wr32(priv, 0x400054, 0x2c350f63);
428
429 nvc0_graph_zbc_init(priv);
430
428 return nvc0_graph_init_ctxctl(priv); 431 return nvc0_graph_init_ctxctl(priv);
429} 432}
430 433
@@ -462,4 +465,5 @@ gm107_graph_oclass = &(struct nvc0_graph_oclass) {
462 .mmio = gm107_graph_pack_mmio, 465 .mmio = gm107_graph_pack_mmio,
463 .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL, 466 .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
464 .gpccs.ucode = &gm107_graph_gpccs_ucode, 467 .gpccs.ucode = &gm107_graph_gpccs_ucode,
468 .ppc_nr = 2,
465}.base; 469}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index ad13dcdd15f9..f70e2f67a4dd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -24,7 +24,6 @@
24 24
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/class.h>
28#include <core/handle.h> 27#include <core/handle.h>
29#include <core/namedb.h> 28#include <core/namedb.h>
30 29
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 4532f7e5618c..2b12b09683c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -24,7 +24,6 @@
24 24
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/class.h>
28#include <core/handle.h> 27#include <core/handle.h>
29 28
30#include <subdev/fb.h> 29#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
index 00ea1a089822..2b0e8f48c029 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -33,7 +33,7 @@ static struct nouveau_oclass
33nv108_graph_sclass[] = { 33nv108_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 34 { 0x902d, &nouveau_object_ofuncs },
35 { 0xa140, &nouveau_object_ofuncs }, 35 { 0xa140, &nouveau_object_ofuncs },
36 { 0xa197, &nouveau_object_ofuncs }, 36 { KEPLER_B, &nvc0_fermi_ofuncs },
37 { 0xa1c0, &nouveau_object_ofuncs }, 37 { 0xa1c0, &nouveau_object_ofuncs },
38 {} 38 {}
39}; 39};
@@ -220,4 +220,5 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) {
220 .mmio = nv108_graph_pack_mmio, 220 .mmio = nv108_graph_pack_mmio,
221 .fecs.ucode = &nv108_graph_fecs_ucode, 221 .fecs.ucode = &nv108_graph_fecs_ucode,
222 .gpccs.ucode = &nv108_graph_gpccs_ucode, 222 .gpccs.ucode = &nv108_graph_gpccs_ucode,
223 .ppc_nr = 1,
223}.base; 224}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index d145e080899a..ceb9c746d94e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,6 +1,5 @@
1#include <core/client.h> 1#include <core/client.h>
2#include <core/os.h> 2#include <core/os.h>
3#include <core/class.h>
4#include <core/engctx.h> 3#include <core/engctx.h>
5#include <core/handle.h> 4#include <core/handle.h>
6#include <core/enum.h> 5#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index 7a80d005a974..f8a6fdd7d5e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -1,5 +1,4 @@
1#include <core/os.h> 1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h> 2#include <core/engctx.h>
4#include <core/enum.h> 3#include <core/enum.h>
5 4
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 3e1f32ee43d4..5de9caa2ef67 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -1,5 +1,4 @@
1#include <core/os.h> 1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h> 2#include <core/engctx.h>
4#include <core/enum.h> 3#include <core/enum.h>
5 4
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index e451db32e92a..2f9dbc709389 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -1,5 +1,4 @@
1#include <core/os.h> 1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h> 2#include <core/engctx.h>
4#include <core/enum.h> 3#include <core/enum.h>
5 4
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 9385ac7b44a4..34dd26c70b64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -1,5 +1,4 @@
1#include <core/os.h> 1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h> 2#include <core/engctx.h>
4#include <core/enum.h> 3#include <core/enum.h>
5 4
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index 9ce84b73f86a..2fb5756d9f66 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -1,5 +1,4 @@
1#include <core/os.h> 1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h> 2#include <core/engctx.h>
4#include <core/enum.h> 3#include <core/enum.h>
5 4
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 6477fbf6a550..4f401174868d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -24,7 +24,6 @@
24 24
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/class.h>
28#include <core/handle.h> 27#include <core/handle.h>
29#include <core/engctx.h> 28#include <core/engctx.h>
30 29
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 20665c21d80e..38e0aa26f1cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/client.h> 26#include <core/client.h>
28#include <core/handle.h> 27#include <core/handle.h>
29#include <core/engctx.h> 28#include <core/engctx.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index aa0838916354..db19191176fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -26,15 +26,226 @@
26#include "ctxnvc0.h" 26#include "ctxnvc0.h"
27 27
28/******************************************************************************* 28/*******************************************************************************
29 * Zero Bandwidth Clear
30 ******************************************************************************/
31
32static void
33nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc)
34{
35 if (priv->zbc_color[zbc].format) {
36 nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
37 nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
38 nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
39 nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
40 }
41 nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
42 nv_wr32(priv, 0x405820, zbc);
43 nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
44}
45
46static int
47nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
48 const u32 ds[4], const u32 l2[4])
49{
50 struct nouveau_ltc *ltc = nouveau_ltc(priv);
51 int zbc = -ENOSPC, i;
52
53 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
54 if (priv->zbc_color[i].format) {
55 if (priv->zbc_color[i].format != format)
56 continue;
57 if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
58 priv->zbc_color[i].ds)))
59 continue;
60 if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
61 priv->zbc_color[i].l2))) {
62 WARN_ON(1);
63 return -EINVAL;
64 }
65 return i;
66 } else {
67 zbc = (zbc < 0) ? i : zbc;
68 }
69 }
70
71 memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
72 memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
73 priv->zbc_color[zbc].format = format;
74 ltc->zbc_color_get(ltc, zbc, l2);
75 nvc0_graph_zbc_clear_color(priv, zbc);
76 return zbc;
77}
78
79static void
80nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc)
81{
82 if (priv->zbc_depth[zbc].format)
83 nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
84 nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
85 nv_wr32(priv, 0x405820, zbc);
86 nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
87}
88
89static int
90nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
91 const u32 ds, const u32 l2)
92{
93 struct nouveau_ltc *ltc = nouveau_ltc(priv);
94 int zbc = -ENOSPC, i;
95
96 for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
97 if (priv->zbc_depth[i].format) {
98 if (priv->zbc_depth[i].format != format)
99 continue;
100 if (priv->zbc_depth[i].ds != ds)
101 continue;
102 if (priv->zbc_depth[i].l2 != l2) {
103 WARN_ON(1);
104 return -EINVAL;
105 }
106 return i;
107 } else {
108 zbc = (zbc < 0) ? i : zbc;
109 }
110 }
111
112 priv->zbc_depth[zbc].format = format;
113 priv->zbc_depth[zbc].ds = ds;
114 priv->zbc_depth[zbc].l2 = l2;
115 ltc->zbc_depth_get(ltc, zbc, l2);
116 nvc0_graph_zbc_clear_depth(priv, zbc);
117 return zbc;
118}
119
120/*******************************************************************************
29 * Graphics object classes 121 * Graphics object classes
30 ******************************************************************************/ 122 ******************************************************************************/
31 123
124static int
125nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
126{
127 struct nvc0_graph_priv *priv = (void *)object->engine;
128 union {
129 struct fermi_a_zbc_color_v0 v0;
130 } *args = data;
131 int ret;
132
133 if (nvif_unpack(args->v0, 0, 0, false)) {
134 switch (args->v0.format) {
135 case FERMI_A_ZBC_COLOR_V0_FMT_ZERO:
136 case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE:
137 case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32:
138 case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16:
139 case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16:
140 case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16:
141 case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16:
142 case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16:
143 case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8:
144 case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8:
145 case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10:
146 case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10:
147 case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8:
148 case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8:
149 case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8:
150 case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8:
151 case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
152 case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
153 case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
154 ret = nvc0_graph_zbc_color_get(priv, args->v0.format,
155 args->v0.ds,
156 args->v0.l2);
157 if (ret >= 0) {
158 args->v0.index = ret;
159 return 0;
160 }
161 break;
162 default:
163 return -EINVAL;
164 }
165 }
166
167 return ret;
168}
169
170static int
171nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
172{
173 struct nvc0_graph_priv *priv = (void *)object->engine;
174 union {
175 struct fermi_a_zbc_depth_v0 v0;
176 } *args = data;
177 int ret;
178
179 if (nvif_unpack(args->v0, 0, 0, false)) {
180 switch (args->v0.format) {
181 case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
182 ret = nvc0_graph_zbc_depth_get(priv, args->v0.format,
183 args->v0.ds,
184 args->v0.l2);
185 return (ret >= 0) ? 0 : -ENOSPC;
186 default:
187 return -EINVAL;
188 }
189 }
190
191 return ret;
192}
193
194static int
195nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
196{
197 switch (mthd) {
198 case FERMI_A_ZBC_COLOR:
199 return nvc0_fermi_mthd_zbc_color(object, data, size);
200 case FERMI_A_ZBC_DEPTH:
201 return nvc0_fermi_mthd_zbc_depth(object, data, size);
202 default:
203 break;
204 }
205 return -EINVAL;
206}
207
208struct nouveau_ofuncs
209nvc0_fermi_ofuncs = {
210 .ctor = _nouveau_object_ctor,
211 .dtor = nouveau_object_destroy,
212 .init = nouveau_object_init,
213 .fini = nouveau_object_fini,
214 .mthd = nvc0_fermi_mthd,
215};
216
217static int
218nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd,
219 void *pdata, u32 size)
220{
221 struct nvc0_graph_priv *priv = (void *)nv_engine(object);
222 if (size >= sizeof(u32)) {
223 u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
224 nv_wr32(priv, 0x419e44, data);
225 nv_wr32(priv, 0x419e4c, data);
226 return 0;
227 }
228 return -EINVAL;
229}
230
231struct nouveau_omthds
232nvc0_graph_9097_omthds[] = {
233 { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
234 {}
235};
236
237struct nouveau_omthds
238nvc0_graph_90c0_omthds[] = {
239 { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
240 {}
241};
242
32struct nouveau_oclass 243struct nouveau_oclass
33nvc0_graph_sclass[] = { 244nvc0_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 245 { 0x902d, &nouveau_object_ofuncs },
35 { 0x9039, &nouveau_object_ofuncs }, 246 { 0x9039, &nouveau_object_ofuncs },
36 { 0x9097, &nouveau_object_ofuncs }, 247 { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
37 { 0x90c0, &nouveau_object_ofuncs }, 248 { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
38 {} 249 {}
39}; 250};
40 251
@@ -98,7 +309,7 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
98 u32 addr = mmio->addr; 309 u32 addr = mmio->addr;
99 u32 data = mmio->data; 310 u32 data = mmio->data;
100 311
101 if (mmio->shift) { 312 if (mmio->buffer >= 0) {
102 u64 info = chan->data[mmio->buffer].vma.offset; 313 u64 info = chan->data[mmio->buffer].vma.offset;
103 data |= info >> mmio->shift; 314 data |= info >> mmio->shift;
104 } 315 }
@@ -407,6 +618,35 @@ nvc0_graph_pack_mmio[] = {
407 ******************************************************************************/ 618 ******************************************************************************/
408 619
409void 620void
621nvc0_graph_zbc_init(struct nvc0_graph_priv *priv)
622{
623 const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
624 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
625 const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
626 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff };
627 const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
628 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
629 const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
630 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
631 struct nouveau_ltc *ltc = nouveau_ltc(priv);
632 int index;
633
634 if (!priv->zbc_color[0].format) {
635 nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]);
636 nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]);
637 nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
638 nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
639 nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
640 nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
641 }
642
643 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
644 nvc0_graph_zbc_clear_color(priv, index);
645 for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
646 nvc0_graph_zbc_clear_depth(priv, index);
647}
648
649void
410nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p) 650nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
411{ 651{
412 const struct nvc0_graph_pack *pack; 652 const struct nvc0_graph_pack *pack;
@@ -969,17 +1209,16 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
969{ 1209{
970 struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass; 1210 struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
971 struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; 1211 struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
972 u32 r000260;
973 int i; 1212 int i;
974 1213
975 if (priv->firmware) { 1214 if (priv->firmware) {
976 /* load fuc microcode */ 1215 /* load fuc microcode */
977 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 1216 nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
978 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, 1217 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
979 &priv->fuc409d); 1218 &priv->fuc409d);
980 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, 1219 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
981 &priv->fuc41ad); 1220 &priv->fuc41ad);
982 nv_wr32(priv, 0x000260, r000260); 1221 nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
983 1222
984 /* start both of them running */ 1223 /* start both of them running */
985 nv_wr32(priv, 0x409840, 0xffffffff); 1224 nv_wr32(priv, 0x409840, 0xffffffff);
@@ -1066,7 +1305,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
1066 } 1305 }
1067 1306
1068 /* load HUB microcode */ 1307 /* load HUB microcode */
1069 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); 1308 nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
1070 nv_wr32(priv, 0x4091c0, 0x01000000); 1309 nv_wr32(priv, 0x4091c0, 0x01000000);
1071 for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) 1310 for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
1072 nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); 1311 nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
@@ -1089,7 +1328,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
1089 nv_wr32(priv, 0x41a188, i >> 6); 1328 nv_wr32(priv, 0x41a188, i >> 6);
1090 nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); 1329 nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
1091 } 1330 }
1092 nv_wr32(priv, 0x000260, r000260); 1331 nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
1093 1332
1094 /* load register lists */ 1333 /* load register lists */
1095 nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); 1334 nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
@@ -1224,6 +1463,9 @@ nvc0_graph_init(struct nouveau_object *object)
1224 nv_wr32(priv, 0x400134, 0xffffffff); 1463 nv_wr32(priv, 0x400134, 0xffffffff);
1225 1464
1226 nv_wr32(priv, 0x400054, 0x34ce3464); 1465 nv_wr32(priv, 0x400054, 0x34ce3464);
1466
1467 nvc0_graph_zbc_init(priv);
1468
1227 return nvc0_graph_init_ctxctl(priv); 1469 return nvc0_graph_init_ctxctl(priv);
1228} 1470}
1229 1471
@@ -1287,7 +1529,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1287 struct nouveau_device *device = nv_device(parent); 1529 struct nouveau_device *device = nv_device(parent);
1288 struct nvc0_graph_priv *priv; 1530 struct nvc0_graph_priv *priv;
1289 bool use_ext_fw, enable; 1531 bool use_ext_fw, enable;
1290 int ret, i; 1532 int ret, i, j;
1291 1533
1292 use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW", 1534 use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
1293 oclass->fecs.ucode == NULL); 1535 oclass->fecs.ucode == NULL);
@@ -1333,6 +1575,11 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1333 for (i = 0; i < priv->gpc_nr; i++) { 1575 for (i = 0; i < priv->gpc_nr; i++) {
1334 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); 1576 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
1335 priv->tpc_total += priv->tpc_nr[i]; 1577 priv->tpc_total += priv->tpc_nr[i];
1578 priv->ppc_nr[i] = oclass->ppc_nr;
1579 for (j = 0; j < priv->ppc_nr[i]; j++) {
1580 u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
1581 priv->ppc_tpc_nr[i][j] = hweight8(mask);
1582 }
1336 } 1583 }
1337 1584
1338 /*XXX: these need figuring out... though it might not even matter */ 1585 /*XXX: these need figuring out... though it might not even matter */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ffc289198dd8..7ed9e89c3435 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -30,10 +30,15 @@
30#include <core/gpuobj.h> 30#include <core/gpuobj.h>
31#include <core/option.h> 31#include <core/option.h>
32 32
33#include <nvif/unpack.h>
34#include <nvif/class.h>
35
33#include <subdev/fb.h> 36#include <subdev/fb.h>
34#include <subdev/vm.h> 37#include <subdev/vm.h>
35#include <subdev/bar.h> 38#include <subdev/bar.h>
36#include <subdev/timer.h> 39#include <subdev/timer.h>
40#include <subdev/mc.h>
41#include <subdev/ltc.h>
37 42
38#include <engine/fifo.h> 43#include <engine/fifo.h>
39#include <engine/graph.h> 44#include <engine/graph.h>
@@ -60,7 +65,7 @@ struct nvc0_graph_mmio {
60 u32 addr; 65 u32 addr;
61 u32 data; 66 u32 data;
62 u32 shift; 67 u32 shift;
63 u32 buffer; 68 int buffer;
64}; 69};
65 70
66struct nvc0_graph_fuc { 71struct nvc0_graph_fuc {
@@ -68,6 +73,18 @@ struct nvc0_graph_fuc {
68 u32 size; 73 u32 size;
69}; 74};
70 75
76struct nvc0_graph_zbc_color {
77 u32 format;
78 u32 ds[4];
79 u32 l2[4];
80};
81
82struct nvc0_graph_zbc_depth {
83 u32 format;
84 u32 ds;
85 u32 l2;
86};
87
71struct nvc0_graph_priv { 88struct nvc0_graph_priv {
72 struct nouveau_graph base; 89 struct nouveau_graph base;
73 90
@@ -77,10 +94,15 @@ struct nvc0_graph_priv {
77 struct nvc0_graph_fuc fuc41ad; 94 struct nvc0_graph_fuc fuc41ad;
78 bool firmware; 95 bool firmware;
79 96
97 struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT];
98 struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
99
80 u8 rop_nr; 100 u8 rop_nr;
81 u8 gpc_nr; 101 u8 gpc_nr;
82 u8 tpc_nr[GPC_MAX]; 102 u8 tpc_nr[GPC_MAX];
83 u8 tpc_total; 103 u8 tpc_total;
104 u8 ppc_nr[GPC_MAX];
105 u8 ppc_tpc_nr[GPC_MAX][4];
84 106
85 struct nouveau_gpuobj *unk4188b4; 107 struct nouveau_gpuobj *unk4188b4;
86 struct nouveau_gpuobj *unk4188b8; 108 struct nouveau_gpuobj *unk4188b8;
@@ -118,12 +140,20 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
118 struct nouveau_object **); 140 struct nouveau_object **);
119void nvc0_graph_dtor(struct nouveau_object *); 141void nvc0_graph_dtor(struct nouveau_object *);
120int nvc0_graph_init(struct nouveau_object *); 142int nvc0_graph_init(struct nouveau_object *);
143void nvc0_graph_zbc_init(struct nvc0_graph_priv *);
144
121int nve4_graph_fini(struct nouveau_object *, bool); 145int nve4_graph_fini(struct nouveau_object *, bool);
122int nve4_graph_init(struct nouveau_object *); 146int nve4_graph_init(struct nouveau_object *);
123 147
124extern struct nouveau_oclass nvc0_graph_sclass[]; 148int nvf0_graph_fini(struct nouveau_object *, bool);
149
150extern struct nouveau_ofuncs nvc0_fermi_ofuncs;
125 151
152extern struct nouveau_oclass nvc0_graph_sclass[];
153extern struct nouveau_omthds nvc0_graph_9097_omthds[];
154extern struct nouveau_omthds nvc0_graph_90c0_omthds[];
126extern struct nouveau_oclass nvc8_graph_sclass[]; 155extern struct nouveau_oclass nvc8_graph_sclass[];
156extern struct nouveau_oclass nvf0_graph_sclass[];
127 157
128struct nvc0_graph_init { 158struct nvc0_graph_init {
129 u32 addr; 159 u32 addr;
@@ -149,6 +179,9 @@ struct nvc0_graph_ucode {
149extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode; 179extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode;
150extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode; 180extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode;
151 181
182extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode;
183extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode;
184
152struct nvc0_graph_oclass { 185struct nvc0_graph_oclass {
153 struct nouveau_oclass base; 186 struct nouveau_oclass base;
154 struct nouveau_oclass **cclass; 187 struct nouveau_oclass **cclass;
@@ -160,6 +193,7 @@ struct nvc0_graph_oclass {
160 struct { 193 struct {
161 struct nvc0_graph_ucode *ucode; 194 struct nvc0_graph_ucode *ucode;
162 } gpccs; 195 } gpccs;
196 int ppc_nr;
163}; 197};
164 198
165void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *); 199void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
@@ -223,9 +257,11 @@ extern const struct nvc0_graph_init nve4_graph_init_be_0[];
223extern const struct nvc0_graph_pack nve4_graph_pack_mmio[]; 257extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
224 258
225extern const struct nvc0_graph_init nvf0_graph_init_fe_0[]; 259extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
260extern const struct nvc0_graph_init nvf0_graph_init_ds_0[];
226extern const struct nvc0_graph_init nvf0_graph_init_sked_0[]; 261extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
227extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[]; 262extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
228extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[]; 263extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
264extern const struct nvc0_graph_init nvf0_graph_init_tex_0[];
229extern const struct nvc0_graph_init nvf0_graph_init_sm_0[]; 265extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
230 266
231extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[]; 267extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
index 30cab0b2eba1..93d58e5b82c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
@@ -33,9 +33,9 @@ static struct nouveau_oclass
33nvc1_graph_sclass[] = { 33nvc1_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 34 { 0x902d, &nouveau_object_ofuncs },
35 { 0x9039, &nouveau_object_ofuncs }, 35 { 0x9039, &nouveau_object_ofuncs },
36 { 0x9097, &nouveau_object_ofuncs }, 36 { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
37 { 0x90c0, &nouveau_object_ofuncs }, 37 { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
38 { 0x9197, &nouveau_object_ofuncs }, 38 { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
39 {} 39 {}
40}; 40};
41 41
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
index a6bf783e1256..692e1eda0eb4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
@@ -33,10 +33,10 @@ struct nouveau_oclass
33nvc8_graph_sclass[] = { 33nvc8_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 34 { 0x902d, &nouveau_object_ofuncs },
35 { 0x9039, &nouveau_object_ofuncs }, 35 { 0x9039, &nouveau_object_ofuncs },
36 { 0x9097, &nouveau_object_ofuncs }, 36 { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
37 { 0x90c0, &nouveau_object_ofuncs }, 37 { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
38 { 0x9197, &nouveau_object_ofuncs }, 38 { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
39 { 0x9297, &nouveau_object_ofuncs }, 39 { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
40 {} 40 {}
41}; 41};
42 42
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
index 2a6a94e2a041..41e8445c7eea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
@@ -133,4 +133,5 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
133 .mmio = nvd7_graph_pack_mmio, 133 .mmio = nvd7_graph_pack_mmio,
134 .fecs.ucode = &nvd7_graph_fecs_ucode, 134 .fecs.ucode = &nvd7_graph_fecs_ucode,
135 .gpccs.ucode = &nvd7_graph_gpccs_ucode, 135 .gpccs.ucode = &nvd7_graph_gpccs_ucode,
136 .ppc_nr = 1,
136}.base; 137}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index 51e0c075ad34..0c71f5c67ae0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -22,6 +22,8 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <subdev/pwr.h>
26
25#include "nvc0.h" 27#include "nvc0.h"
26#include "ctxnvc0.h" 28#include "ctxnvc0.h"
27 29
@@ -33,8 +35,8 @@ static struct nouveau_oclass
33nve4_graph_sclass[] = { 35nve4_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 36 { 0x902d, &nouveau_object_ofuncs },
35 { 0xa040, &nouveau_object_ofuncs }, 37 { 0xa040, &nouveau_object_ofuncs },
36 { 0xa097, &nouveau_object_ofuncs }, 38 { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
37 { 0xa0c0, &nouveau_object_ofuncs }, 39 { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
38 {} 40 {}
39}; 41};
40 42
@@ -190,39 +192,20 @@ nve4_graph_pack_mmio[] = {
190 ******************************************************************************/ 192 ******************************************************************************/
191 193
192int 194int
193nve4_graph_fini(struct nouveau_object *object, bool suspend)
194{
195 struct nvc0_graph_priv *priv = (void *)object;
196
197 /*XXX: this is a nasty hack to power on gr on certain boards
198 * where it's disabled by therm, somehow. ideally it'd
199 * be nice to know when we should be doing this, and why,
200 * but, it's yet to be determined. for now we test for
201 * the particular mmio error that occurs in the situation,
202 * and then bash therm in the way nvidia do.
203 */
204 nv_mask(priv, 0x000200, 0x08001000, 0x08001000);
205 nv_rd32(priv, 0x000200);
206 if (nv_rd32(priv, 0x400700) == 0xbadf1000) {
207 nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
208 nv_rd32(priv, 0x000200);
209 nv_mask(priv, 0x020004, 0xc0000000, 0x40000000);
210 }
211
212 return nouveau_graph_fini(&priv->base, suspend);
213}
214
215int
216nve4_graph_init(struct nouveau_object *object) 195nve4_graph_init(struct nouveau_object *object)
217{ 196{
218 struct nvc0_graph_oclass *oclass = (void *)object->oclass; 197 struct nvc0_graph_oclass *oclass = (void *)object->oclass;
219 struct nvc0_graph_priv *priv = (void *)object; 198 struct nvc0_graph_priv *priv = (void *)object;
199 struct nouveau_pwr *ppwr = nouveau_pwr(priv);
220 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); 200 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
221 u32 data[TPC_MAX / 8] = {}; 201 u32 data[TPC_MAX / 8] = {};
222 u8 tpcnr[GPC_MAX]; 202 u8 tpcnr[GPC_MAX];
223 int gpc, tpc, rop; 203 int gpc, tpc, rop;
224 int ret, i; 204 int ret, i;
225 205
206 if (ppwr)
207 ppwr->pgob(ppwr, false);
208
226 ret = nouveau_graph_init(&priv->base); 209 ret = nouveau_graph_init(&priv->base);
227 if (ret) 210 if (ret)
228 return ret; 211 return ret;
@@ -320,6 +303,9 @@ nve4_graph_init(struct nouveau_object *object)
320 nv_wr32(priv, 0x400134, 0xffffffff); 303 nv_wr32(priv, 0x400134, 0xffffffff);
321 304
322 nv_wr32(priv, 0x400054, 0x34ce3464); 305 nv_wr32(priv, 0x400054, 0x34ce3464);
306
307 nvc0_graph_zbc_init(priv);
308
323 return nvc0_graph_init_ctxctl(priv); 309 return nvc0_graph_init_ctxctl(priv);
324} 310}
325 311
@@ -350,11 +336,12 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) {
350 .ctor = nvc0_graph_ctor, 336 .ctor = nvc0_graph_ctor,
351 .dtor = nvc0_graph_dtor, 337 .dtor = nvc0_graph_dtor,
352 .init = nve4_graph_init, 338 .init = nve4_graph_init,
353 .fini = nve4_graph_fini, 339 .fini = _nouveau_graph_fini,
354 }, 340 },
355 .cclass = &nve4_grctx_oclass, 341 .cclass = &nve4_grctx_oclass,
356 .sclass = nve4_graph_sclass, 342 .sclass = nve4_graph_sclass,
357 .mmio = nve4_graph_pack_mmio, 343 .mmio = nve4_graph_pack_mmio,
358 .fecs.ucode = &nve4_graph_fecs_ucode, 344 .fecs.ucode = &nve4_graph_fecs_ucode,
359 .gpccs.ucode = &nve4_graph_gpccs_ucode, 345 .gpccs.ucode = &nve4_graph_gpccs_ucode,
346 .ppc_nr = 1,
360}.base; 347}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index c96762122b9b..c306c0f2fc84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -29,12 +29,12 @@
29 * Graphics object classes 29 * Graphics object classes
30 ******************************************************************************/ 30 ******************************************************************************/
31 31
32static struct nouveau_oclass 32struct nouveau_oclass
33nvf0_graph_sclass[] = { 33nvf0_graph_sclass[] = {
34 { 0x902d, &nouveau_object_ofuncs }, 34 { 0x902d, &nouveau_object_ofuncs },
35 { 0xa140, &nouveau_object_ofuncs }, 35 { 0xa140, &nouveau_object_ofuncs },
36 { 0xa197, &nouveau_object_ofuncs }, 36 { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
37 { 0xa1c0, &nouveau_object_ofuncs }, 37 { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
38 {} 38 {}
39}; 39};
40 40
@@ -50,7 +50,7 @@ nvf0_graph_init_fe_0[] = {
50 {} 50 {}
51}; 51};
52 52
53static const struct nvc0_graph_init 53const struct nvc0_graph_init
54nvf0_graph_init_ds_0[] = { 54nvf0_graph_init_ds_0[] = {
55 { 0x405844, 1, 0x04, 0x00ffffff }, 55 { 0x405844, 1, 0x04, 0x00ffffff },
56 { 0x405850, 1, 0x04, 0x00000000 }, 56 { 0x405850, 1, 0x04, 0x00000000 },
@@ -88,7 +88,7 @@ nvf0_graph_init_gpc_unk_1[] = {
88 {} 88 {}
89}; 89};
90 90
91static const struct nvc0_graph_init 91const struct nvc0_graph_init
92nvf0_graph_init_tex_0[] = { 92nvf0_graph_init_tex_0[] = {
93 { 0x419ab0, 1, 0x04, 0x00000000 }, 93 { 0x419ab0, 1, 0x04, 0x00000000 },
94 { 0x419ac8, 1, 0x04, 0x00000000 }, 94 { 0x419ac8, 1, 0x04, 0x00000000 },
@@ -170,7 +170,7 @@ nvf0_graph_pack_mmio[] = {
170 * PGRAPH engine/subdev functions 170 * PGRAPH engine/subdev functions
171 ******************************************************************************/ 171 ******************************************************************************/
172 172
173static int 173int
174nvf0_graph_fini(struct nouveau_object *object, bool suspend) 174nvf0_graph_fini(struct nouveau_object *object, bool suspend)
175{ 175{
176 struct nvc0_graph_priv *priv = (void *)object; 176 struct nvc0_graph_priv *priv = (void *)object;
@@ -209,7 +209,7 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
209 209
210#include "fuc/hubnvf0.fuc.h" 210#include "fuc/hubnvf0.fuc.h"
211 211
212static struct nvc0_graph_ucode 212struct nvc0_graph_ucode
213nvf0_graph_fecs_ucode = { 213nvf0_graph_fecs_ucode = {
214 .code.data = nvf0_grhub_code, 214 .code.data = nvf0_grhub_code,
215 .code.size = sizeof(nvf0_grhub_code), 215 .code.size = sizeof(nvf0_grhub_code),
@@ -219,7 +219,7 @@ nvf0_graph_fecs_ucode = {
219 219
220#include "fuc/gpcnvf0.fuc.h" 220#include "fuc/gpcnvf0.fuc.h"
221 221
222static struct nvc0_graph_ucode 222struct nvc0_graph_ucode
223nvf0_graph_gpccs_ucode = { 223nvf0_graph_gpccs_ucode = {
224 .code.data = nvf0_grgpc_code, 224 .code.data = nvf0_grgpc_code,
225 .code.size = sizeof(nvf0_grgpc_code), 225 .code.size = sizeof(nvf0_grgpc_code),
@@ -241,4 +241,5 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
241 .mmio = nvf0_graph_pack_mmio, 241 .mmio = nvf0_graph_pack_mmio,
242 .fecs.ucode = &nvf0_graph_fecs_ucode, 242 .fecs.ucode = &nvf0_graph_fecs_ucode,
243 .gpccs.ucode = &nvf0_graph_gpccs_ucode, 243 .gpccs.ucode = &nvf0_graph_gpccs_ucode,
244 .ppc_nr = 2,
244}.base; 245}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 7eb6d94c84e2..d88c700b2f69 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -24,7 +24,6 @@
24 24
25#include <core/client.h> 25#include <core/client.h>
26#include <core/os.h> 26#include <core/os.h>
27#include <core/class.h>
28#include <core/engctx.h> 27#include <core/engctx.h>
29#include <core/handle.h> 28#include <core/handle.h>
30 29
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index d4e7ec0ba68c..bdb2f20ff7b1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28 27
29#include <subdev/fb.h> 28#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
index 3d8c2133e0e8..72c7f33fd29b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/client.h> 26#include <core/client.h>
28#include <core/engctx.h> 27#include <core/engctx.h>
29#include <core/handle.h> 28#include <core/handle.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 37a2bd9e8078..cae33f86b11a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28 27
29#include <subdev/vm.h> 28#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
index 96f5aa92677b..e9cc8b116a24 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28 27
29#include <subdev/vm.h> 28#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
index e9c5e51943ef..63013812f7c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -22,8 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/option.h> 26#include <core/option.h>
26#include <core/class.h> 27#include <nvif/unpack.h>
28#include <nvif/class.h>
29#include <nvif/ioctl.h>
27 30
28#include <subdev/clock.h> 31#include <subdev/clock.h>
29 32
@@ -101,24 +104,28 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
101 * Perfmon object classes 104 * Perfmon object classes
102 ******************************************************************************/ 105 ******************************************************************************/
103static int 106static int
104nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, 107nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size)
105 void *data, u32 size)
106{ 108{
109 union {
110 struct nvif_perfctr_query_v0 v0;
111 } *args = data;
107 struct nouveau_device *device = nv_device(object); 112 struct nouveau_device *device = nv_device(object);
108 struct nouveau_perfmon *ppm = (void *)object->engine; 113 struct nouveau_perfmon *ppm = (void *)object->engine;
109 struct nouveau_perfdom *dom = NULL, *chk; 114 struct nouveau_perfdom *dom = NULL, *chk;
110 struct nv_perfctr_query *args = data;
111 const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false); 115 const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
112 const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all); 116 const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
113 const char *name; 117 const char *name;
114 int tmp = 0, di, si; 118 int tmp = 0, di, si;
115 char path[64]; 119 int ret;
116
117 if (size < sizeof(*args))
118 return -EINVAL;
119 120
120 di = (args->iter & 0xff000000) >> 24; 121 nv_ioctl(object, "perfctr query size %d\n", size);
121 si = (args->iter & 0x00ffffff) - 1; 122 if (nvif_unpack(args->v0, 0, 0, false)) {
123 nv_ioctl(object, "perfctr query vers %d iter %08x\n",
124 args->v0.version, args->v0.iter);
125 di = (args->v0.iter & 0xff000000) >> 24;
126 si = (args->v0.iter & 0x00ffffff) - 1;
127 } else
128 return ret;
122 129
123 list_for_each_entry(chk, &ppm->domains, head) { 130 list_for_each_entry(chk, &ppm->domains, head) {
124 if (tmp++ == di) { 131 if (tmp++ == di) {
@@ -132,19 +139,17 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
132 139
133 if (si >= 0) { 140 if (si >= 0) {
134 if (raw || !(name = dom->signal[si].name)) { 141 if (raw || !(name = dom->signal[si].name)) {
135 snprintf(path, sizeof(path), "/%s/%02x", dom->name, si); 142 snprintf(args->v0.name, sizeof(args->v0.name),
136 name = path; 143 "/%s/%02x", dom->name, si);
144 } else {
145 strncpy(args->v0.name, name, sizeof(args->v0.name));
137 } 146 }
138
139 if (args->name)
140 strncpy(args->name, name, args->size);
141 args->size = strlen(name) + 1;
142 } 147 }
143 148
144 do { 149 do {
145 while (++si < dom->signal_nr) { 150 while (++si < dom->signal_nr) {
146 if (all || dom->signal[si].name) { 151 if (all || dom->signal[si].name) {
147 args->iter = (di << 24) | ++si; 152 args->v0.iter = (di << 24) | ++si;
148 return 0; 153 return 0;
149 } 154 }
150 } 155 }
@@ -153,21 +158,26 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
153 dom = list_entry(dom->head.next, typeof(*dom), head); 158 dom = list_entry(dom->head.next, typeof(*dom), head);
154 } while (&dom->head != &ppm->domains); 159 } while (&dom->head != &ppm->domains);
155 160
156 args->iter = 0xffffffff; 161 args->v0.iter = 0xffffffff;
157 return 0; 162 return 0;
158} 163}
159 164
160static int 165static int
161nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd, 166nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
162 void *data, u32 size)
163{ 167{
168 union {
169 struct nvif_perfctr_sample none;
170 } *args = data;
164 struct nouveau_perfmon *ppm = (void *)object->engine; 171 struct nouveau_perfmon *ppm = (void *)object->engine;
165 struct nouveau_perfctr *ctr, *tmp; 172 struct nouveau_perfctr *ctr, *tmp;
166 struct nouveau_perfdom *dom; 173 struct nouveau_perfdom *dom;
167 struct nv_perfctr_sample *args = data; 174 int ret;
168 175
169 if (size < sizeof(*args)) 176 nv_ioctl(object, "perfctr sample size %d\n", size);
170 return -EINVAL; 177 if (nvif_unvers(args->none)) {
178 nv_ioctl(object, "perfctr sample\n");
179 } else
180 return ret;
171 ppm->sequence++; 181 ppm->sequence++;
172 182
173 list_for_each_entry(dom, &ppm->domains, head) { 183 list_for_each_entry(dom, &ppm->domains, head) {
@@ -206,22 +216,45 @@ nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
206} 216}
207 217
208static int 218static int
209nouveau_perfctr_read(struct nouveau_object *object, u32 mthd, 219nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size)
210 void *data, u32 size)
211{ 220{
221 union {
222 struct nvif_perfctr_read_v0 v0;
223 } *args = data;
212 struct nouveau_perfctr *ctr = (void *)object; 224 struct nouveau_perfctr *ctr = (void *)object;
213 struct nv_perfctr_read *args = data; 225 int ret;
226
227 nv_ioctl(object, "perfctr read size %d\n", size);
228 if (nvif_unpack(args->v0, 0, 0, false)) {
229 nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
230 } else
231 return ret;
214 232
215 if (size < sizeof(*args))
216 return -EINVAL;
217 if (!ctr->clk) 233 if (!ctr->clk)
218 return -EAGAIN; 234 return -EAGAIN;
219 235
220 args->clk = ctr->clk; 236 args->v0.clk = ctr->clk;
221 args->ctr = ctr->ctr; 237 args->v0.ctr = ctr->ctr;
222 return 0; 238 return 0;
223} 239}
224 240
241static int
242nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd,
243 void *data, u32 size)
244{
245 switch (mthd) {
246 case NVIF_PERFCTR_V0_QUERY:
247 return nouveau_perfctr_query(object, data, size);
248 case NVIF_PERFCTR_V0_SAMPLE:
249 return nouveau_perfctr_sample(object, data, size);
250 case NVIF_PERFCTR_V0_READ:
251 return nouveau_perfctr_read(object, data, size);
252 default:
253 break;
254 }
255 return -EINVAL;
256}
257
225static void 258static void
226nouveau_perfctr_dtor(struct nouveau_object *object) 259nouveau_perfctr_dtor(struct nouveau_object *object)
227{ 260{
@@ -237,19 +270,27 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
237 struct nouveau_oclass *oclass, void *data, u32 size, 270 struct nouveau_oclass *oclass, void *data, u32 size,
238 struct nouveau_object **pobject) 271 struct nouveau_object **pobject)
239{ 272{
273 union {
274 struct nvif_perfctr_v0 v0;
275 } *args = data;
240 struct nouveau_perfmon *ppm = (void *)engine; 276 struct nouveau_perfmon *ppm = (void *)engine;
241 struct nouveau_perfdom *dom = NULL; 277 struct nouveau_perfdom *dom = NULL;
242 struct nouveau_perfsig *sig[4] = {}; 278 struct nouveau_perfsig *sig[4] = {};
243 struct nouveau_perfctr *ctr; 279 struct nouveau_perfctr *ctr;
244 struct nv_perfctr_class *args = data;
245 int ret, i; 280 int ret, i;
246 281
247 if (size < sizeof(*args)) 282 nv_ioctl(parent, "create perfctr size %d\n", size);
248 return -EINVAL; 283 if (nvif_unpack(args->v0, 0, 0, false)) {
284 nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
285 args->v0.version, args->v0.logic_op);
286 } else
287 return ret;
249 288
250 for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) { 289 for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
251 sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name, 290 sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i],
252 args->signal[i].size, &dom); 291 strnlen(args->v0.name[i],
292 sizeof(args->v0.name[i])),
293 &dom);
253 if (!sig[i]) 294 if (!sig[i])
254 return -EINVAL; 295 return -EINVAL;
255 } 296 }
@@ -260,7 +301,7 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
260 return ret; 301 return ret;
261 302
262 ctr->slot = -1; 303 ctr->slot = -1;
263 ctr->logic_op = args->logic_op; 304 ctr->logic_op = args->v0.logic_op;
264 ctr->signal[0] = sig[0]; 305 ctr->signal[0] = sig[0];
265 ctr->signal[1] = sig[1]; 306 ctr->signal[1] = sig[1];
266 ctr->signal[2] = sig[2]; 307 ctr->signal[2] = sig[2];
@@ -276,21 +317,13 @@ nouveau_perfctr_ofuncs = {
276 .dtor = nouveau_perfctr_dtor, 317 .dtor = nouveau_perfctr_dtor,
277 .init = nouveau_object_init, 318 .init = nouveau_object_init,
278 .fini = nouveau_object_fini, 319 .fini = nouveau_object_fini,
279}; 320 .mthd = nouveau_perfctr_mthd,
280
281static struct nouveau_omthds
282nouveau_perfctr_omthds[] = {
283 { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
284 { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
285 { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
286 {}
287}; 321};
288 322
289struct nouveau_oclass 323struct nouveau_oclass
290nouveau_perfmon_sclass[] = { 324nouveau_perfmon_sclass[] = {
291 { .handle = NV_PERFCTR_CLASS, 325 { .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
292 .ofuncs = &nouveau_perfctr_ofuncs, 326 .ofuncs = &nouveau_perfctr_ofuncs,
293 .omthds = nouveau_perfctr_omthds,
294 }, 327 },
295 {}, 328 {},
296}; 329};
@@ -303,6 +336,7 @@ nouveau_perfctx_dtor(struct nouveau_object *object)
303{ 336{
304 struct nouveau_perfmon *ppm = (void *)object->engine; 337 struct nouveau_perfmon *ppm = (void *)object->engine;
305 mutex_lock(&nv_subdev(ppm)->mutex); 338 mutex_lock(&nv_subdev(ppm)->mutex);
339 nouveau_engctx_destroy(&ppm->context->base);
306 ppm->context = NULL; 340 ppm->context = NULL;
307 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
308} 342}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index c571758e4a27..64df15c7f051 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28 27
29#include <engine/software.h> 28#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a62f11a78430..f54a2253deca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28 27
29#include <engine/software.h> 28#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index f3b4d9dbf23c..4d2994d8cc32 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -23,12 +23,12 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28#include <core/namedb.h> 27#include <core/namedb.h>
29#include <core/handle.h> 28#include <core/handle.h>
30#include <core/gpuobj.h> 29#include <core/gpuobj.h>
31#include <core/event.h> 30#include <core/event.h>
31#include <nvif/event.h>
32 32
33#include <subdev/bar.h> 33#include <subdev/bar.h>
34 34
@@ -86,10 +86,10 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
86{ 86{
87 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 87 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
88 u32 head = *(u32 *)args; 88 u32 head = *(u32 *)args;
89 if (head >= chan->vblank.nr_event) 89 if (head >= nouveau_disp(chan)->vblank.index_nr)
90 return -EINVAL; 90 return -EINVAL;
91 91
92 nouveau_event_get(chan->vblank.event[head]); 92 nvkm_notify_get(&chan->vblank.notify[head]);
93 return 0; 93 return 0;
94} 94}
95 95
@@ -124,9 +124,10 @@ nv50_software_sclass[] = {
124 ******************************************************************************/ 124 ******************************************************************************/
125 125
126static int 126static int
127nv50_software_vblsem_release(void *data, u32 type, int head) 127nv50_software_vblsem_release(struct nvkm_notify *notify)
128{ 128{
129 struct nv50_software_chan *chan = data; 129 struct nv50_software_chan *chan =
130 container_of(notify, typeof(*chan), vblank.notify[notify->index]);
130 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 131 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
131 struct nouveau_bar *bar = nouveau_bar(priv); 132 struct nouveau_bar *bar = nouveau_bar(priv);
132 133
@@ -142,7 +143,7 @@ nv50_software_vblsem_release(void *data, u32 type, int head)
142 nv_wr32(priv, 0x060014, chan->vblank.value); 143 nv_wr32(priv, 0x060014, chan->vblank.value);
143 } 144 }
144 145
145 return NVKM_EVENT_DROP; 146 return NVKM_NOTIFY_DROP;
146} 147}
147 148
148void 149void
@@ -151,11 +152,8 @@ nv50_software_context_dtor(struct nouveau_object *object)
151 struct nv50_software_chan *chan = (void *)object; 152 struct nv50_software_chan *chan = (void *)object;
152 int i; 153 int i;
153 154
154 if (chan->vblank.event) { 155 for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
155 for (i = 0; i < chan->vblank.nr_event; i++) 156 nvkm_notify_fini(&chan->vblank.notify[i]);
156 nouveau_event_ref(NULL, &chan->vblank.event[i]);
157 kfree(chan->vblank.event);
158 }
159 157
160 nouveau_software_context_destroy(&chan->base); 158 nouveau_software_context_destroy(&chan->base);
161} 159}
@@ -176,15 +174,14 @@ nv50_software_context_ctor(struct nouveau_object *parent,
176 if (ret) 174 if (ret)
177 return ret; 175 return ret;
178 176
179 chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0; 177 for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
180 chan->vblank.event = kzalloc(chan->vblank.nr_event * 178 ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false,
181 sizeof(*chan->vblank.event), GFP_KERNEL); 179 &(struct nvif_notify_head_req_v0) {
182 if (!chan->vblank.event) 180 .head = i,
183 return -ENOMEM; 181 },
184 182 sizeof(struct nvif_notify_head_req_v0),
185 for (i = 0; i < chan->vblank.nr_event; i++) { 183 sizeof(struct nvif_notify_head_rep_v0),
186 ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank, 184 &chan->vblank.notify[i]);
187 chan, &chan->vblank.event[i]);
188 if (ret) 185 if (ret)
189 return ret; 186 return ret;
190 } 187 }
@@ -198,7 +195,7 @@ nv50_software_cclass = {
198 .base.handle = NV_ENGCTX(SW, 0x50), 195 .base.handle = NV_ENGCTX(SW, 0x50),
199 .base.ofuncs = &(struct nouveau_ofuncs) { 196 .base.ofuncs = &(struct nouveau_ofuncs) {
200 .ctor = nv50_software_context_ctor, 197 .ctor = nv50_software_context_ctor,
201 .dtor = _nouveau_software_context_dtor, 198 .dtor = nv50_software_context_dtor,
202 .init = _nouveau_software_context_init, 199 .init = _nouveau_software_context_init,
203 .fini = _nouveau_software_context_fini, 200 .fini = _nouveau_software_context_fini,
204 }, 201 },
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
index bb49a7a20857..41542e725b4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -19,14 +19,13 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
19 19
20struct nv50_software_cclass { 20struct nv50_software_cclass {
21 struct nouveau_oclass base; 21 struct nouveau_oclass base;
22 int (*vblank)(void *, u32, int); 22 int (*vblank)(struct nvkm_notify *);
23}; 23};
24 24
25struct nv50_software_chan { 25struct nv50_software_chan {
26 struct nouveau_software_chan base; 26 struct nouveau_software_chan base;
27 struct { 27 struct {
28 struct nouveau_eventh **event; 28 struct nvkm_notify notify[4];
29 int nr_event;
30 u32 channel; 29 u32 channel;
31 u32 ctxdma; 30 u32 ctxdma;
32 u64 offset; 31 u64 offset;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 135c20f38356..6af370d3a06d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -23,7 +23,6 @@
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
28#include <core/event.h> 27#include <core/event.h>
29 28
@@ -104,9 +103,10 @@ nvc0_software_sclass[] = {
104 ******************************************************************************/ 103 ******************************************************************************/
105 104
106static int 105static int
107nvc0_software_vblsem_release(void *data, u32 type, int head) 106nvc0_software_vblsem_release(struct nvkm_notify *notify)
108{ 107{
109 struct nv50_software_chan *chan = data; 108 struct nv50_software_chan *chan =
109 container_of(notify, typeof(*chan), vblank.notify[notify->index]);
110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; 110 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
111 struct nouveau_bar *bar = nouveau_bar(priv); 111 struct nouveau_bar *bar = nouveau_bar(priv);
112 112
@@ -116,7 +116,7 @@ nvc0_software_vblsem_release(void *data, u32 type, int head)
116 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); 116 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
117 nv_wr32(priv, 0x060014, chan->vblank.value); 117 nv_wr32(priv, 0x060014, chan->vblank.value);
118 118
119 return NVKM_EVENT_DROP; 119 return NVKM_NOTIFY_DROP;
120} 120}
121 121
122static struct nv50_software_cclass 122static struct nv50_software_cclass
@@ -124,7 +124,7 @@ nvc0_software_cclass = {
124 .base.handle = NV_ENGCTX(SW, 0xc0), 124 .base.handle = NV_ENGCTX(SW, 0xc0),
125 .base.ofuncs = &(struct nouveau_ofuncs) { 125 .base.ofuncs = &(struct nouveau_ofuncs) {
126 .ctor = nv50_software_context_ctor, 126 .ctor = nv50_software_context_ctor,
127 .dtor = _nouveau_software_context_dtor, 127 .dtor = nv50_software_context_dtor,
128 .init = _nouveau_software_context_init, 128 .init = _nouveau_software_context_init,
129 .fini = _nouveau_software_context_fini, 129 .fini = _nouveau_software_context_fini,
130 }, 130 },
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
deleted file mode 100644
index e0c812bc884f..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ /dev/null
@@ -1,470 +0,0 @@
1#ifndef __NOUVEAU_CLASS_H__
2#define __NOUVEAU_CLASS_H__
3
4/* Device class
5 *
6 * 0080: NV_DEVICE
7 */
8#define NV_DEVICE_CLASS 0x00000080
9
10#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL
11#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL
12#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL
13#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL
14#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL
15#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL
16#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL
17#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL
18#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL
19#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL
20#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL
21#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL
22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
27
28struct nv_device_class {
29 u64 device; /* device identifier, ~0 for client default */
30 u64 disable; /* disable particular subsystems */
31 u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
32};
33
34/* DMA object classes
35 *
36 * 0002: NV_DMA_FROM_MEMORY
37 * 0003: NV_DMA_TO_MEMORY
38 * 003d: NV_DMA_IN_MEMORY
39 */
40#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
41#define NV_DMA_TO_MEMORY_CLASS 0x00000003
42#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
43
44#define NV_DMA_TARGET_MASK 0x000000ff
45#define NV_DMA_TARGET_VM 0x00000000
46#define NV_DMA_TARGET_VRAM 0x00000001
47#define NV_DMA_TARGET_PCI 0x00000002
48#define NV_DMA_TARGET_PCI_US 0x00000003
49#define NV_DMA_TARGET_AGP 0x00000004
50#define NV_DMA_ACCESS_MASK 0x00000f00
51#define NV_DMA_ACCESS_VM 0x00000000
52#define NV_DMA_ACCESS_RD 0x00000100
53#define NV_DMA_ACCESS_WR 0x00000200
54#define NV_DMA_ACCESS_RDWR 0x00000300
55
56/* NV50:NVC0 */
57#define NV50_DMA_CONF0_ENABLE 0x80000000
58#define NV50_DMA_CONF0_PRIV 0x00300000
59#define NV50_DMA_CONF0_PRIV_VM 0x00000000
60#define NV50_DMA_CONF0_PRIV_US 0x00100000
61#define NV50_DMA_CONF0_PRIV__S 0x00200000
62#define NV50_DMA_CONF0_PART 0x00030000
63#define NV50_DMA_CONF0_PART_VM 0x00000000
64#define NV50_DMA_CONF0_PART_256 0x00010000
65#define NV50_DMA_CONF0_PART_1KB 0x00020000
66#define NV50_DMA_CONF0_COMP 0x00000180
67#define NV50_DMA_CONF0_COMP_NONE 0x00000000
68#define NV50_DMA_CONF0_COMP_VM 0x00000180
69#define NV50_DMA_CONF0_TYPE 0x0000007f
70#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
71#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
72
73/* NVC0:NVD9 */
74#define NVC0_DMA_CONF0_ENABLE 0x80000000
75#define NVC0_DMA_CONF0_PRIV 0x00300000
76#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
77#define NVC0_DMA_CONF0_PRIV_US 0x00100000
78#define NVC0_DMA_CONF0_PRIV__S 0x00200000
79#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
80#define NVC0_DMA_CONF0_TYPE 0x000000ff
81#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
82#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
83
84/* NVD9- */
85#define NVD0_DMA_CONF0_ENABLE 0x80000000
86#define NVD0_DMA_CONF0_PAGE 0x00000400
87#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
88#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
89#define NVD0_DMA_CONF0_TYPE 0x000000ff
90#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
91#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
92
93struct nv_dma_class {
94 u32 flags;
95 u32 pad0;
96 u64 start;
97 u64 limit;
98 u32 conf0;
99};
100
101/* Perfmon counter class
102 *
103 * XXXX: NV_PERFCTR
104 */
105#define NV_PERFCTR_CLASS 0x0000ffff
106#define NV_PERFCTR_QUERY 0x00000000
107#define NV_PERFCTR_SAMPLE 0x00000001
108#define NV_PERFCTR_READ 0x00000002
109
110struct nv_perfctr_class {
111 u16 logic_op;
112 struct {
113 char __user *name; /*XXX: use cfu when exposed to userspace */
114 u32 size;
115 } signal[4];
116};
117
118struct nv_perfctr_query {
119 u32 iter;
120 u32 size;
121 char __user *name; /*XXX: use ctu when exposed to userspace */
122};
123
124struct nv_perfctr_sample {
125};
126
127struct nv_perfctr_read {
128 u32 ctr;
129 u32 clk;
130};
131
132/* Device control class
133 *
134 * XXXX: NV_CONTROL
135 */
136#define NV_CONTROL_CLASS 0x0000fffe
137
138#define NV_CONTROL_PSTATE_INFO 0x00000000
139#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
140#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
141#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
142#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
143#define NV_CONTROL_PSTATE_ATTR 0x00000001
144#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
145#define NV_CONTROL_PSTATE_USER 0x00000002
146#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
147#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
148
149struct nv_control_pstate_info {
150 u32 count; /* out: number of power states */
151 s32 ustate; /* out: current target pstate index */
152 u32 pstate; /* out: current pstate index */
153};
154
155struct nv_control_pstate_attr {
156 s32 state; /* in: index of pstate to query
157 * out: pstate identifier
158 */
159 u32 index; /* in: index of attribute to query
160 * out: index of next attribute, or 0 if no more
161 */
162 char name[32];
163 char unit[16];
164 u32 min;
165 u32 max;
166};
167
168struct nv_control_pstate_user {
169 s32 state; /* in: pstate identifier */
170};
171
172/* DMA FIFO channel classes
173 *
174 * 006b: NV03_CHANNEL_DMA
175 * 006e: NV10_CHANNEL_DMA
176 * 176e: NV17_CHANNEL_DMA
177 * 406e: NV40_CHANNEL_DMA
178 * 506e: NV50_CHANNEL_DMA
179 * 826e: NV84_CHANNEL_DMA
180 */
181#define NV03_CHANNEL_DMA_CLASS 0x0000006b
182#define NV10_CHANNEL_DMA_CLASS 0x0000006e
183#define NV17_CHANNEL_DMA_CLASS 0x0000176e
184#define NV40_CHANNEL_DMA_CLASS 0x0000406e
185#define NV50_CHANNEL_DMA_CLASS 0x0000506e
186#define NV84_CHANNEL_DMA_CLASS 0x0000826e
187
188struct nv03_channel_dma_class {
189 u32 pushbuf;
190 u32 pad0;
191 u64 offset;
192};
193
194/* Indirect FIFO channel classes
195 *
196 * 506f: NV50_CHANNEL_IND
197 * 826f: NV84_CHANNEL_IND
198 * 906f: NVC0_CHANNEL_IND
199 * a06f: NVE0_CHANNEL_IND
200 */
201
202#define NV50_CHANNEL_IND_CLASS 0x0000506f
203#define NV84_CHANNEL_IND_CLASS 0x0000826f
204#define NVC0_CHANNEL_IND_CLASS 0x0000906f
205#define NVE0_CHANNEL_IND_CLASS 0x0000a06f
206
207struct nv50_channel_ind_class {
208 u32 pushbuf;
209 u32 ilength;
210 u64 ioffset;
211};
212
213#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001
214#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002
215#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004
216#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008
217#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010
218#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020
219#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040
220
221struct nve0_channel_ind_class {
222 u32 pushbuf;
223 u32 ilength;
224 u64 ioffset;
225 u32 engine;
226};
227
228/* 0046: NV04_DISP
229 */
230
231#define NV04_DISP_CLASS 0x00000046
232
233#define NV04_DISP_MTHD 0x00000000
234#define NV04_DISP_MTHD_HEAD 0x00000001
235
236#define NV04_DISP_SCANOUTPOS 0x00000000
237
238struct nv04_display_class {
239};
240
241struct nv04_display_scanoutpos {
242 s64 time[2];
243 u32 vblanks;
244 u32 vblanke;
245 u32 vtotal;
246 u32 vline;
247 u32 hblanks;
248 u32 hblanke;
249 u32 htotal;
250 u32 hline;
251};
252
253/* 5070: NV50_DISP
254 * 8270: NV84_DISP
255 * 8370: NVA0_DISP
256 * 8870: NV94_DISP
257 * 8570: NVA3_DISP
258 * 9070: NVD0_DISP
259 * 9170: NVE0_DISP
260 * 9270: NVF0_DISP
261 * 9470: GM107_DISP
262 */
263
264#define NV50_DISP_CLASS 0x00005070
265#define NV84_DISP_CLASS 0x00008270
266#define NVA0_DISP_CLASS 0x00008370
267#define NV94_DISP_CLASS 0x00008870
268#define NVA3_DISP_CLASS 0x00008570
269#define NVD0_DISP_CLASS 0x00009070
270#define NVE0_DISP_CLASS 0x00009170
271#define NVF0_DISP_CLASS 0x00009270
272#define GM107_DISP_CLASS 0x00009470
273
274#define NV50_DISP_MTHD 0x00000000
275#define NV50_DISP_MTHD_HEAD 0x00000003
276
277#define NV50_DISP_SCANOUTPOS 0x00000000
278
279#define NV50_DISP_SOR_MTHD 0x00010000
280#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
281#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
282#define NV50_DISP_SOR_MTHD_LINK 0x00000004
283#define NV50_DISP_SOR_MTHD_OR 0x00000003
284
285#define NV50_DISP_SOR_PWR 0x00010000
286#define NV50_DISP_SOR_PWR_STATE 0x00000001
287#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
288#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
289#define NVA3_DISP_SOR_HDA_ELD 0x00010100
290#define NV84_DISP_SOR_HDMI_PWR 0x00012000
291#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
292#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
293#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
294#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
295#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
296#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
297#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
298#define NV94_DISP_SOR_DP_PWR 0x00016000
299#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001
300#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000
301#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001
302
303#define NV50_DISP_DAC_MTHD 0x00020000
304#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
305#define NV50_DISP_DAC_MTHD_OR 0x00000003
306
307#define NV50_DISP_DAC_PWR 0x00020000
308#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
309#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
310#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
311#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
312#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
313#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
314#define NV50_DISP_DAC_PWR_DATA 0x00000010
315#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
316#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
317#define NV50_DISP_DAC_PWR_STATE 0x00000040
318#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
319#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
320#define NV50_DISP_DAC_LOAD 0x00020100
321#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
322
323#define NV50_DISP_PIOR_MTHD 0x00030000
324#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
325#define NV50_DISP_PIOR_MTHD_OR 0x00000003
326
327#define NV50_DISP_PIOR_PWR 0x00030000
328#define NV50_DISP_PIOR_PWR_STATE 0x00000001
329#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
330#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
331#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
332#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
333#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
334#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
335#define NV50_DISP_PIOR_DP_PWR 0x00036000
336#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
337#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
338#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
339
340struct nv50_display_class {
341};
342
343/* 507a: NV50_DISP_CURS
344 * 827a: NV84_DISP_CURS
345 * 837a: NVA0_DISP_CURS
346 * 887a: NV94_DISP_CURS
347 * 857a: NVA3_DISP_CURS
348 * 907a: NVD0_DISP_CURS
349 * 917a: NVE0_DISP_CURS
350 * 927a: NVF0_DISP_CURS
351 * 947a: GM107_DISP_CURS
352 */
353
354#define NV50_DISP_CURS_CLASS 0x0000507a
355#define NV84_DISP_CURS_CLASS 0x0000827a
356#define NVA0_DISP_CURS_CLASS 0x0000837a
357#define NV94_DISP_CURS_CLASS 0x0000887a
358#define NVA3_DISP_CURS_CLASS 0x0000857a
359#define NVD0_DISP_CURS_CLASS 0x0000907a
360#define NVE0_DISP_CURS_CLASS 0x0000917a
361#define NVF0_DISP_CURS_CLASS 0x0000927a
362#define GM107_DISP_CURS_CLASS 0x0000947a
363
364struct nv50_display_curs_class {
365 u32 head;
366};
367
368/* 507b: NV50_DISP_OIMM
369 * 827b: NV84_DISP_OIMM
370 * 837b: NVA0_DISP_OIMM
371 * 887b: NV94_DISP_OIMM
372 * 857b: NVA3_DISP_OIMM
373 * 907b: NVD0_DISP_OIMM
374 * 917b: NVE0_DISP_OIMM
375 * 927b: NVE0_DISP_OIMM
376 * 947b: GM107_DISP_OIMM
377 */
378
379#define NV50_DISP_OIMM_CLASS 0x0000507b
380#define NV84_DISP_OIMM_CLASS 0x0000827b
381#define NVA0_DISP_OIMM_CLASS 0x0000837b
382#define NV94_DISP_OIMM_CLASS 0x0000887b
383#define NVA3_DISP_OIMM_CLASS 0x0000857b
384#define NVD0_DISP_OIMM_CLASS 0x0000907b
385#define NVE0_DISP_OIMM_CLASS 0x0000917b
386#define NVF0_DISP_OIMM_CLASS 0x0000927b
387#define GM107_DISP_OIMM_CLASS 0x0000947b
388
389struct nv50_display_oimm_class {
390 u32 head;
391};
392
393/* 507c: NV50_DISP_SYNC
394 * 827c: NV84_DISP_SYNC
395 * 837c: NVA0_DISP_SYNC
396 * 887c: NV94_DISP_SYNC
397 * 857c: NVA3_DISP_SYNC
398 * 907c: NVD0_DISP_SYNC
399 * 917c: NVE0_DISP_SYNC
400 * 927c: NVF0_DISP_SYNC
401 * 947c: GM107_DISP_SYNC
402 */
403
404#define NV50_DISP_SYNC_CLASS 0x0000507c
405#define NV84_DISP_SYNC_CLASS 0x0000827c
406#define NVA0_DISP_SYNC_CLASS 0x0000837c
407#define NV94_DISP_SYNC_CLASS 0x0000887c
408#define NVA3_DISP_SYNC_CLASS 0x0000857c
409#define NVD0_DISP_SYNC_CLASS 0x0000907c
410#define NVE0_DISP_SYNC_CLASS 0x0000917c
411#define NVF0_DISP_SYNC_CLASS 0x0000927c
412#define GM107_DISP_SYNC_CLASS 0x0000947c
413
414struct nv50_display_sync_class {
415 u32 pushbuf;
416 u32 head;
417};
418
419/* 507d: NV50_DISP_MAST
420 * 827d: NV84_DISP_MAST
421 * 837d: NVA0_DISP_MAST
422 * 887d: NV94_DISP_MAST
423 * 857d: NVA3_DISP_MAST
424 * 907d: NVD0_DISP_MAST
425 * 917d: NVE0_DISP_MAST
426 * 927d: NVF0_DISP_MAST
427 * 947d: GM107_DISP_MAST
428 */
429
430#define NV50_DISP_MAST_CLASS 0x0000507d
431#define NV84_DISP_MAST_CLASS 0x0000827d
432#define NVA0_DISP_MAST_CLASS 0x0000837d
433#define NV94_DISP_MAST_CLASS 0x0000887d
434#define NVA3_DISP_MAST_CLASS 0x0000857d
435#define NVD0_DISP_MAST_CLASS 0x0000907d
436#define NVE0_DISP_MAST_CLASS 0x0000917d
437#define NVF0_DISP_MAST_CLASS 0x0000927d
438#define GM107_DISP_MAST_CLASS 0x0000947d
439
440struct nv50_display_mast_class {
441 u32 pushbuf;
442};
443
444/* 507e: NV50_DISP_OVLY
445 * 827e: NV84_DISP_OVLY
446 * 837e: NVA0_DISP_OVLY
447 * 887e: NV94_DISP_OVLY
448 * 857e: NVA3_DISP_OVLY
449 * 907e: NVD0_DISP_OVLY
450 * 917e: NVE0_DISP_OVLY
451 * 927e: NVF0_DISP_OVLY
452 * 947e: GM107_DISP_OVLY
453 */
454
455#define NV50_DISP_OVLY_CLASS 0x0000507e
456#define NV84_DISP_OVLY_CLASS 0x0000827e
457#define NVA0_DISP_OVLY_CLASS 0x0000837e
458#define NV94_DISP_OVLY_CLASS 0x0000887e
459#define NVA3_DISP_OVLY_CLASS 0x0000857e
460#define NVD0_DISP_OVLY_CLASS 0x0000907e
461#define NVE0_DISP_OVLY_CLASS 0x0000917e
462#define NVF0_DISP_OVLY_CLASS 0x0000927e
463#define GM107_DISP_OVLY_CLASS 0x0000947e
464
465struct nv50_display_ovly_class {
466 u32 pushbuf;
467 u32 head;
468};
469
470#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index c66eac513803..4fc6ab12382d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -10,6 +10,11 @@ struct nouveau_client {
10 char name[32]; 10 char name[32];
11 u32 debug; 11 u32 debug;
12 struct nouveau_vm *vm; 12 struct nouveau_vm *vm;
13 bool super;
14 void *data;
15
16 int (*ntfy)(const void *, u32, const void *, u32);
17 struct nvkm_client_notify *notify[8];
13}; 18};
14 19
15static inline struct nouveau_client * 20static inline struct nouveau_client *
@@ -43,4 +48,10 @@ int nouveau_client_init(struct nouveau_client *);
43int nouveau_client_fini(struct nouveau_client *, bool suspend); 48int nouveau_client_fini(struct nouveau_client *, bool suspend);
44const char *nouveau_client_name(void *obj); 49const char *nouveau_client_name(void *obj);
45 50
51int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *,
52 void *data, u32 size);
53int nvkm_client_notify_del(struct nouveau_client *, int index);
54int nvkm_client_notify_get(struct nouveau_client *, int index);
55int nvkm_client_notify_put(struct nouveau_client *, int index);
56
46#endif 57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index a8a9a9cf16cb..8743766454a5 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -4,6 +4,7 @@
4#include <core/object.h> 4#include <core/object.h>
5#include <core/subdev.h> 5#include <core/subdev.h>
6#include <core/engine.h> 6#include <core/engine.h>
7#include <core/event.h>
7 8
8enum nv_subdev_type { 9enum nv_subdev_type {
9 NVDEV_ENGINE_DEVICE, 10 NVDEV_ENGINE_DEVICE,
@@ -28,7 +29,7 @@ enum nv_subdev_type {
28 NVDEV_SUBDEV_BUS, 29 NVDEV_SUBDEV_BUS,
29 NVDEV_SUBDEV_TIMER, 30 NVDEV_SUBDEV_TIMER,
30 NVDEV_SUBDEV_FB, 31 NVDEV_SUBDEV_FB,
31 NVDEV_SUBDEV_LTCG, 32 NVDEV_SUBDEV_LTC,
32 NVDEV_SUBDEV_IBUS, 33 NVDEV_SUBDEV_IBUS,
33 NVDEV_SUBDEV_INSTMEM, 34 NVDEV_SUBDEV_INSTMEM,
34 NVDEV_SUBDEV_VM, 35 NVDEV_SUBDEV_VM,
@@ -69,6 +70,8 @@ struct nouveau_device {
69 struct platform_device *platformdev; 70 struct platform_device *platformdev;
70 u64 handle; 71 u64 handle;
71 72
73 struct nvkm_event event;
74
72 const char *cfgopt; 75 const char *cfgopt;
73 const char *dbgopt; 76 const char *dbgopt;
74 const char *name; 77 const char *name;
@@ -84,7 +87,6 @@ struct nouveau_device {
84 NV_40 = 0x40, 87 NV_40 = 0x40,
85 NV_50 = 0x50, 88 NV_50 = 0x50,
86 NV_C0 = 0xc0, 89 NV_C0 = 0xc0,
87 NV_D0 = 0xd0,
88 NV_E0 = 0xe0, 90 NV_E0 = 0xe0,
89 GM100 = 0x110, 91 GM100 = 0x110,
90 } card_type; 92 } card_type;
@@ -93,8 +95,14 @@ struct nouveau_device {
93 95
94 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; 96 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
95 struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; 97 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
98
99 struct {
100 struct notifier_block nb;
101 } acpi;
96}; 102};
97 103
104int nouveau_device_list(u64 *name, int size);
105
98static inline struct nouveau_device * 106static inline struct nouveau_device *
99nv_device(void *obj) 107nv_device(void *obj)
100{ 108{
@@ -162,12 +170,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
162resource_size_t 170resource_size_t
163nv_device_resource_len(struct nouveau_device *device, unsigned int bar); 171nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
164 172
165dma_addr_t
166nv_device_map_page(struct nouveau_device *device, struct page *page);
167
168void
169nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
170
171int 173int
172nv_device_get_irq(struct nouveau_device *device, bool stall); 174nv_device_get_irq(struct nouveau_device *device, bool stall);
173 175
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index ba3f1a76a815..51e55d03330a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -1,47 +1,34 @@
1#ifndef __NVKM_EVENT_H__ 1#ifndef __NVKM_EVENT_H__
2#define __NVKM_EVENT_H__ 2#define __NVKM_EVENT_H__
3 3
4/* return codes from event handlers */ 4#include <core/notify.h>
5#define NVKM_EVENT_DROP 0
6#define NVKM_EVENT_KEEP 1
7 5
8/* nouveau_eventh.flags bit #s */ 6struct nvkm_event_func {
9#define NVKM_EVENT_ENABLE 0 7 int (*ctor)(void *data, u32 size, struct nvkm_notify *);
10 8 void (*send)(void *data, u32 size, struct nvkm_notify *);
11struct nouveau_eventh { 9 void (*init)(struct nvkm_event *, int type, int index);
12 struct nouveau_event *event; 10 void (*fini)(struct nvkm_event *, int type, int index);
13 struct list_head head;
14 unsigned long flags;
15 u32 types;
16 int index;
17 int (*func)(void *, u32, int);
18 void *priv;
19}; 11};
20 12
21struct nouveau_event { 13struct nvkm_event {
22 void *priv; 14 const struct nvkm_event_func *func;
23 int (*check)(struct nouveau_event *, u32 type, int index);
24 void (*enable)(struct nouveau_event *, int type, int index);
25 void (*disable)(struct nouveau_event *, int type, int index);
26 15
27 int types_nr; 16 int types_nr;
28 int index_nr; 17 int index_nr;
29 18
30 spinlock_t list_lock;
31 struct list_head *list;
32 spinlock_t refs_lock; 19 spinlock_t refs_lock;
33 int refs[]; 20 spinlock_t list_lock;
21 struct list_head list;
22 int *refs;
34}; 23};
35 24
36int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **); 25int nvkm_event_init(const struct nvkm_event_func *func,
37void nouveau_event_destroy(struct nouveau_event **); 26 int types_nr, int index_nr,
38void nouveau_event_trigger(struct nouveau_event *, u32 types, int index); 27 struct nvkm_event *);
39 28void nvkm_event_fini(struct nvkm_event *);
40int nouveau_event_new(struct nouveau_event *, u32 types, int index, 29void nvkm_event_get(struct nvkm_event *, u32 types, int index);
41 int (*func)(void *, u32, int), void *, 30void nvkm_event_put(struct nvkm_event *, u32 types, int index);
42 struct nouveau_eventh **); 31void nvkm_event_send(struct nvkm_event *, u32 types, int index,
43void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **); 32 void *data, u32 size);
44void nouveau_event_get(struct nouveau_eventh *);
45void nouveau_event_put(struct nouveau_eventh *);
46 33
47#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index 363674cdf8ab..ceb67d770875 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -10,6 +10,9 @@ struct nouveau_handle {
10 u32 name; 10 u32 name;
11 u32 priv; 11 u32 priv;
12 12
13 u8 route;
14 u64 token;
15
13 struct nouveau_handle *parent; 16 struct nouveau_handle *parent;
14 struct nouveau_object *object; 17 struct nouveau_object *object;
15}; 18};
@@ -20,6 +23,11 @@ void nouveau_handle_destroy(struct nouveau_handle *);
20int nouveau_handle_init(struct nouveau_handle *); 23int nouveau_handle_init(struct nouveau_handle *);
21int nouveau_handle_fini(struct nouveau_handle *, bool suspend); 24int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
22 25
26int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
27 u16 oclass, void *data, u32 size,
28 struct nouveau_object **);
29int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
30
23struct nouveau_object * 31struct nouveau_object *
24nouveau_handle_ref(struct nouveau_object *, u32 name); 32nouveau_handle_ref(struct nouveau_object *, u32 name);
25 33
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
new file mode 100644
index 000000000000..ac7935c2474e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
@@ -0,0 +1,6 @@
1#ifndef __NVKM_IOCTL_H__
2#define __NVKM_IOCTL_H__
3
4int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **);
5
6#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h
new file mode 100644
index 000000000000..1262d8f020f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h
@@ -0,0 +1,36 @@
1#ifndef __NVKM_NOTIFY_H__
2#define __NVKM_NOTIFY_H__
3
4struct nvkm_notify {
5 struct nvkm_event *event;
6 struct list_head head;
7#define NVKM_NOTIFY_USER 0
8#define NVKM_NOTIFY_WORK 1
9 unsigned long flags;
10 int block;
11#define NVKM_NOTIFY_DROP 0
12#define NVKM_NOTIFY_KEEP 1
13 int (*func)(struct nvkm_notify *);
14
15 /* set by nvkm_event ctor */
16 u32 types;
17 int index;
18 u32 size;
19
20 struct work_struct work;
21 /* this is const for a *very* good reason - the data might be on the
22 * stack from an irq handler. if you're not core/notify.c then you
23 * should probably think twice before casting it away...
24 */
25 const void *data;
26};
27
28int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *),
29 bool work, void *data, u32 size, u32 reply,
30 struct nvkm_notify *);
31void nvkm_notify_fini(struct nvkm_notify *);
32void nvkm_notify_get(struct nvkm_notify *);
33void nvkm_notify_put(struct nvkm_notify *);
34void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size);
35
36#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 62e68baef087..d7039482d6fd 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -48,6 +48,10 @@ void nouveau_object_destroy(struct nouveau_object *);
48int nouveau_object_init(struct nouveau_object *); 48int nouveau_object_init(struct nouveau_object *);
49int nouveau_object_fini(struct nouveau_object *, bool suspend); 49int nouveau_object_fini(struct nouveau_object *, bool suspend);
50 50
51int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
52 struct nouveau_oclass *, void *, u32,
53 struct nouveau_object **);
54
51extern struct nouveau_ofuncs nouveau_object_ofuncs; 55extern struct nouveau_ofuncs nouveau_object_ofuncs;
52 56
53/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in 57/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
@@ -78,6 +82,7 @@ struct nouveau_omthds {
78 int (*call)(struct nouveau_object *, u32, void *, u32); 82 int (*call)(struct nouveau_object *, u32, void *, u32);
79}; 83};
80 84
85struct nvkm_event;
81struct nouveau_ofuncs { 86struct nouveau_ofuncs {
82 int (*ctor)(struct nouveau_object *, struct nouveau_object *, 87 int (*ctor)(struct nouveau_object *, struct nouveau_object *,
83 struct nouveau_oclass *, void *data, u32 size, 88 struct nouveau_oclass *, void *data, u32 size,
@@ -85,6 +90,9 @@ struct nouveau_ofuncs {
85 void (*dtor)(struct nouveau_object *); 90 void (*dtor)(struct nouveau_object *);
86 int (*init)(struct nouveau_object *); 91 int (*init)(struct nouveau_object *);
87 int (*fini)(struct nouveau_object *, bool suspend); 92 int (*fini)(struct nouveau_object *, bool suspend);
93 int (*mthd)(struct nouveau_object *, u32, void *, u32);
94 int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **);
95 int (* map)(struct nouveau_object *, u64 *, u32 *);
88 u8 (*rd08)(struct nouveau_object *, u64 offset); 96 u8 (*rd08)(struct nouveau_object *, u64 offset);
89 u16 (*rd16)(struct nouveau_object *, u64 offset); 97 u16 (*rd16)(struct nouveau_object *, u64 offset);
90 u32 (*rd32)(struct nouveau_object *, u64 offset); 98 u32 (*rd32)(struct nouveau_object *, u64 offset);
@@ -106,10 +114,6 @@ void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
106int nouveau_object_inc(struct nouveau_object *); 114int nouveau_object_inc(struct nouveau_object *);
107int nouveau_object_dec(struct nouveau_object *, bool suspend); 115int nouveau_object_dec(struct nouveau_object *, bool suspend);
108 116
109int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
110 u16 oclass, void *data, u32 size,
111 struct nouveau_object **);
112int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
113void nouveau_object_debug(void); 117void nouveau_object_debug(void);
114 118
115static inline int 119static inline int
@@ -199,4 +203,21 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
199 return 0; 203 return 0;
200} 204}
201 205
206#include <core/handle.h>
207
208static inline int
209nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
210 u16 oclass, void *data, u32 size,
211 struct nouveau_object **pobject)
212{
213 return nouveau_handle_new(client, parent, handle, oclass,
214 data, size, pobject);
215}
216
217static inline int
218nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
219{
220 return nouveau_handle_del(client, parent, handle);
221}
222
202#endif 223#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 9f5ea900ff00..12da418ec70a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -57,5 +57,6 @@ void _nouveau_parent_dtor(struct nouveau_object *);
57int nouveau_parent_sclass(struct nouveau_object *, u16 handle, 57int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
58 struct nouveau_object **pengine, 58 struct nouveau_object **pengine,
59 struct nouveau_oclass **poclass); 59 struct nouveau_oclass **poclass);
60int nouveau_parent_lclass(struct nouveau_object *, u32 *, int);
60 61
61#endif 62#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 0f9a37bd32b0..451b6ed20b7e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -21,6 +21,7 @@ nv_printk_(struct nouveau_object *, int, const char *, ...);
21#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) 21#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
22#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) 22#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
23#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) 23#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
24#define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a)
24 25
25#define nv_assert(f,a...) do { \ 26#define nv_assert(f,a...) do { \
26 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ 27 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index fde842896806..7a64f347b385 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -6,20 +6,13 @@
6#include <core/device.h> 6#include <core/device.h>
7#include <core/event.h> 7#include <core/event.h>
8 8
9enum nvkm_hpd_event {
10 NVKM_HPD_PLUG = 1,
11 NVKM_HPD_UNPLUG = 2,
12 NVKM_HPD_IRQ = 4,
13 NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ)
14};
15
16struct nouveau_disp { 9struct nouveau_disp {
17 struct nouveau_engine base; 10 struct nouveau_engine base;
18 11
19 struct list_head outp; 12 struct list_head outp;
20 struct nouveau_event *hpd;
21 13
22 struct nouveau_event *vblank; 14 struct nvkm_event hpd;
15 struct nvkm_event vblank;
23}; 16};
24 17
25static inline struct nouveau_disp * 18static inline struct nouveau_disp *
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index b28914ed1752..1b283a7b78e6 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,37 +12,20 @@ struct nouveau_dmaobj {
12 u32 access; 12 u32 access;
13 u64 start; 13 u64 start;
14 u64 limit; 14 u64 limit;
15 u32 conf0;
16}; 15};
17 16
18struct nouveau_dmaeng { 17struct nouveau_dmaeng {
19 struct nouveau_engine base; 18 struct nouveau_engine base;
20 19
21 /* creates a "physical" dma object from a struct nouveau_dmaobj */ 20 /* creates a "physical" dma object from a struct nouveau_dmaobj */
22 int (*bind)(struct nouveau_dmaeng *dmaeng, 21 int (*bind)(struct nouveau_dmaobj *dmaobj,
23 struct nouveau_object *parent, 22 struct nouveau_object *parent,
24 struct nouveau_dmaobj *dmaobj,
25 struct nouveau_gpuobj **); 23 struct nouveau_gpuobj **);
26}; 24};
27 25
28#define nouveau_dmaeng_create(p,e,c,d) \ 26extern struct nouveau_oclass *nv04_dmaeng_oclass;
29 nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d)) 27extern struct nouveau_oclass *nv50_dmaeng_oclass;
30#define nouveau_dmaeng_destroy(p) \ 28extern struct nouveau_oclass *nvc0_dmaeng_oclass;
31 nouveau_engine_destroy(&(p)->base) 29extern struct nouveau_oclass *nvd0_dmaeng_oclass;
32#define nouveau_dmaeng_init(p) \
33 nouveau_engine_init(&(p)->base)
34#define nouveau_dmaeng_fini(p,s) \
35 nouveau_engine_fini(&(p)->base, (s))
36
37#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
38#define _nouveau_dmaeng_init _nouveau_engine_init
39#define _nouveau_dmaeng_fini _nouveau_engine_fini
40
41extern struct nouveau_oclass nv04_dmaeng_oclass;
42extern struct nouveau_oclass nv50_dmaeng_oclass;
43extern struct nouveau_oclass nvc0_dmaeng_oclass;
44extern struct nouveau_oclass nvd0_dmaeng_oclass;
45
46extern struct nouveau_oclass nouveau_dmaobj_sclass[];
47 30
48#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b639eb2c74ff..e5e4d930b2c2 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -4,12 +4,14 @@
4#include <core/namedb.h> 4#include <core/namedb.h>
5#include <core/gpuobj.h> 5#include <core/gpuobj.h>
6#include <core/engine.h> 6#include <core/engine.h>
7#include <core/event.h>
7 8
8struct nouveau_fifo_chan { 9struct nouveau_fifo_chan {
9 struct nouveau_namedb base; 10 struct nouveau_namedb base;
10 struct nouveau_dmaobj *pushdma; 11 struct nouveau_dmaobj *pushdma;
11 struct nouveau_gpuobj *pushgpu; 12 struct nouveau_gpuobj *pushgpu;
12 void __iomem *user; 13 void __iomem *user;
14 u64 addr;
13 u32 size; 15 u32 size;
14 u16 chid; 16 u16 chid;
15 atomic_t refcnt; /* NV04_NVSW_SET_REF */ 17 atomic_t refcnt; /* NV04_NVSW_SET_REF */
@@ -40,8 +42,10 @@ void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini 42#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41 43
42void _nouveau_fifo_channel_dtor(struct nouveau_object *); 44void _nouveau_fifo_channel_dtor(struct nouveau_object *);
45int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); 46u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); 47void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
48int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
45 49
46struct nouveau_fifo_base { 50struct nouveau_fifo_base {
47 struct nouveau_gpuobj base; 51 struct nouveau_gpuobj base;
@@ -65,8 +69,8 @@ struct nouveau_fifo_base {
65struct nouveau_fifo { 69struct nouveau_fifo {
66 struct nouveau_engine base; 70 struct nouveau_engine base;
67 71
68 struct nouveau_event *cevent; /* channel creation event */ 72 struct nvkm_event cevent; /* channel creation event */
69 struct nouveau_event *uevent; /* async user trigger */ 73 struct nvkm_event uevent; /* async user trigger */
70 74
71 struct nouveau_object **channel; 75 struct nouveau_object **channel;
72 spinlock_t lock; 76 spinlock_t lock;
@@ -112,6 +116,9 @@ extern struct nouveau_oclass *nve0_fifo_oclass;
112extern struct nouveau_oclass *gk20a_fifo_oclass; 116extern struct nouveau_oclass *gk20a_fifo_oclass;
113extern struct nouveau_oclass *nv108_fifo_oclass; 117extern struct nouveau_oclass *nv108_fifo_oclass;
114 118
119int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *);
120void nouveau_fifo_uevent(struct nouveau_fifo *);
121
115void nv04_fifo_intr(struct nouveau_subdev *); 122void nv04_fifo_intr(struct nouveau_subdev *);
116int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); 123int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
117 124
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8c1d4772da0c..d5055570d01b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -70,6 +70,7 @@ extern struct nouveau_oclass *nvd9_graph_oclass;
70extern struct nouveau_oclass *nve4_graph_oclass; 70extern struct nouveau_oclass *nve4_graph_oclass;
71extern struct nouveau_oclass *gk20a_graph_oclass; 71extern struct nouveau_oclass *gk20a_graph_oclass;
72extern struct nouveau_oclass *nvf0_graph_oclass; 72extern struct nouveau_oclass *nvf0_graph_oclass;
73extern struct nouveau_oclass *gk110b_graph_oclass;
73extern struct nouveau_oclass *nv108_graph_oclass; 74extern struct nouveau_oclass *nv108_graph_oclass;
74extern struct nouveau_oclass *gm107_graph_oclass; 75extern struct nouveau_oclass *gm107_graph_oclass;
75 76
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
index 49b0024910fe..88cc812baaa3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -4,7 +4,6 @@
4#include <core/device.h> 4#include <core/device.h>
5#include <core/engine.h> 5#include <core/engine.h>
6#include <core/engctx.h> 6#include <core/engctx.h>
7#include <core/class.h>
8 7
9struct nouveau_perfdom; 8struct nouveau_perfdom;
10struct nouveau_perfctr; 9struct nouveau_perfctr;
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
new file mode 120000
index 000000000000..f1ac4859edd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
@@ -0,0 +1 @@
../../../nvif/class.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
new file mode 120000
index 000000000000..1b798538a725
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
@@ -0,0 +1 @@
../../../nvif/event.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
new file mode 120000
index 000000000000..8569c86907c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
@@ -0,0 +1 @@
../../../nvif/ioctl.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
new file mode 120000
index 000000000000..69d99292bca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
@@ -0,0 +1 @@
../../../nvif/unpack.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 9faa98e67ad8..be037fac534c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -20,6 +20,9 @@ struct nouveau_bar {
20 u32 flags, struct nouveau_vma *); 20 u32 flags, struct nouveau_vma *);
21 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); 21 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
22 void (*flush)(struct nouveau_bar *); 22 void (*flush)(struct nouveau_bar *);
23
24 /* whether the BAR supports to be ioremapped WC or should be uncached */
25 bool iomap_uncached;
23}; 26};
24 27
25static inline struct nouveau_bar * 28static inline struct nouveau_bar *
@@ -30,5 +33,6 @@ nouveau_bar(void *obj)
30 33
31extern struct nouveau_oclass nv50_bar_oclass; 34extern struct nouveau_oclass nv50_bar_oclass;
32extern struct nouveau_oclass nvc0_bar_oclass; 35extern struct nouveau_oclass nvc0_bar_oclass;
36extern struct nouveau_oclass gk20a_bar_oclass;
33 37
34#endif 38#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index c01e29c9f89a..a5ca00dd2f61 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -71,8 +71,15 @@ struct nouveau_clock {
71 struct list_head states; 71 struct list_head states;
72 int state_nr; 72 int state_nr;
73 73
74 struct work_struct work;
75 wait_queue_head_t wait;
76 atomic_t waiting;
77
78 struct nvkm_notify pwrsrc_ntfy;
79 int pwrsrc;
74 int pstate; /* current */ 80 int pstate; /* current */
75 int ustate; /* user-requested (-1 disabled, -2 perfmon) */ 81 int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
82 int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
76 int astate; /* perfmon adjustment (base) */ 83 int astate; /* perfmon adjustment (base) */
77 int tstate; /* thermal adjustment (max-) */ 84 int tstate; /* thermal adjustment (max-) */
78 int dstate; /* display adjustment (min+) */ 85 int dstate; /* display adjustment (min+) */
@@ -108,8 +115,9 @@ struct nouveau_clocks {
108 int mdiv; 115 int mdiv;
109}; 116};
110 117
111#define nouveau_clock_create(p,e,o,i,r,d) \ 118#define nouveau_clock_create(p,e,o,i,r,s,n,d) \
112 nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d) 119 nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
120 (void **)d)
113#define nouveau_clock_destroy(p) ({ \ 121#define nouveau_clock_destroy(p) ({ \
114 struct nouveau_clock *clk = (p); \ 122 struct nouveau_clock *clk = (p); \
115 _nouveau_clock_dtor(nv_object(clk)); \ 123 _nouveau_clock_dtor(nv_object(clk)); \
@@ -118,15 +126,18 @@ struct nouveau_clocks {
118 struct nouveau_clock *clk = (p); \ 126 struct nouveau_clock *clk = (p); \
119 _nouveau_clock_init(nv_object(clk)); \ 127 _nouveau_clock_init(nv_object(clk)); \
120}) 128})
121#define nouveau_clock_fini(p,s) \ 129#define nouveau_clock_fini(p,s) ({ \
122 nouveau_subdev_fini(&(p)->base, (s)) 130 struct nouveau_clock *clk = (p); \
131 _nouveau_clock_fini(nv_object(clk), (s)); \
132})
123 133
124int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, 134int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
125 struct nouveau_oclass *, 135 struct nouveau_oclass *,
126 struct nouveau_clocks *, bool, int, void **); 136 struct nouveau_clocks *, struct nouveau_pstate *,
137 int, bool, int, void **);
127void _nouveau_clock_dtor(struct nouveau_object *); 138void _nouveau_clock_dtor(struct nouveau_object *);
128int _nouveau_clock_init(struct nouveau_object *); 139int _nouveau_clock_init(struct nouveau_object *);
129#define _nouveau_clock_fini _nouveau_subdev_fini 140int _nouveau_clock_fini(struct nouveau_object *, bool);
130 141
131extern struct nouveau_oclass nv04_clock_oclass; 142extern struct nouveau_oclass nv04_clock_oclass;
132extern struct nouveau_oclass nv40_clock_oclass; 143extern struct nouveau_oclass nv40_clock_oclass;
@@ -136,6 +147,7 @@ extern struct nouveau_oclass *nvaa_clock_oclass;
136extern struct nouveau_oclass nva3_clock_oclass; 147extern struct nouveau_oclass nva3_clock_oclass;
137extern struct nouveau_oclass nvc0_clock_oclass; 148extern struct nouveau_oclass nvc0_clock_oclass;
138extern struct nouveau_oclass nve0_clock_oclass; 149extern struct nouveau_oclass nve0_clock_oclass;
150extern struct nouveau_oclass gk20a_clock_oclass;
139 151
140int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); 152int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
141int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 153int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -145,7 +157,7 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
145int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, 157int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
146 int clk, struct nouveau_pll_vals *); 158 int clk, struct nouveau_pll_vals *);
147 159
148int nouveau_clock_ustate(struct nouveau_clock *, int req); 160int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr);
149int nouveau_clock_astate(struct nouveau_clock *, int req, int rel); 161int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
150int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel); 162int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
151int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel); 163int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 612d82ab683d..b73733d21cc7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -8,16 +8,22 @@
8#include <subdev/bios.h> 8#include <subdev/bios.h>
9#include <subdev/bios/gpio.h> 9#include <subdev/bios/gpio.h>
10 10
11enum nvkm_gpio_event { 11struct nvkm_gpio_ntfy_req {
12 NVKM_GPIO_HI = 1, 12#define NVKM_GPIO_HI 0x01
13 NVKM_GPIO_LO = 2, 13#define NVKM_GPIO_LO 0x02
14 NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO), 14#define NVKM_GPIO_TOGGLED 0x03
15 u8 mask;
16 u8 line;
17};
18
19struct nvkm_gpio_ntfy_rep {
20 u8 mask;
15}; 21};
16 22
17struct nouveau_gpio { 23struct nouveau_gpio {
18 struct nouveau_subdev base; 24 struct nouveau_subdev base;
19 25
20 struct nouveau_event *events; 26 struct nvkm_event event;
21 27
22 void (*reset)(struct nouveau_gpio *, u8 func); 28 void (*reset)(struct nouveau_gpio *, u8 func);
23 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, 29 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 825f7bb46b67..1b937c2c25ae 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -14,15 +14,18 @@
14#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) 14#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
15#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) 15#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
16 16
17enum nvkm_i2c_event { 17struct nvkm_i2c_ntfy_req {
18 NVKM_I2C_PLUG = 1, 18#define NVKM_I2C_PLUG 0x01
19 NVKM_I2C_UNPLUG = 2, 19#define NVKM_I2C_UNPLUG 0x02
20 NVKM_I2C_IRQ = 4, 20#define NVKM_I2C_IRQ 0x04
21 NVKM_I2C_DONE = 8, 21#define NVKM_I2C_DONE 0x08
22 NVKM_I2C_ANY = (NVKM_I2C_PLUG | 22#define NVKM_I2C_ANY 0x0f
23 NVKM_I2C_UNPLUG | 23 u8 mask;
24 NVKM_I2C_IRQ | 24 u8 port;
25 NVKM_I2C_DONE), 25};
26
27struct nvkm_i2c_ntfy_rep {
28 u8 mask;
26}; 29};
27 30
28struct nouveau_i2c_port { 31struct nouveau_i2c_port {
@@ -56,7 +59,7 @@ struct nouveau_i2c_board_info {
56 59
57struct nouveau_i2c { 60struct nouveau_i2c {
58 struct nouveau_subdev base; 61 struct nouveau_subdev base;
59 struct nouveau_event *ntfy; 62 struct nvkm_event event;
60 63
61 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); 64 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
62 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); 65 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
new file mode 100644
index 000000000000..b909a7363f6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
@@ -0,0 +1,35 @@
1#ifndef __NOUVEAU_LTC_H__
2#define __NOUVEAU_LTC_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#define NOUVEAU_LTC_MAX_ZBC_CNT 16
8
9struct nouveau_mm_node;
10
11struct nouveau_ltc {
12 struct nouveau_subdev base;
13
14 int (*tags_alloc)(struct nouveau_ltc *, u32 count,
15 struct nouveau_mm_node **);
16 void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **);
17 void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count);
18
19 int zbc_min;
20 int zbc_max;
21 int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]);
22 int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32);
23};
24
25static inline struct nouveau_ltc *
26nouveau_ltc(void *obj)
27{
28 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC];
29}
30
31extern struct nouveau_oclass *gf100_ltc_oclass;
32extern struct nouveau_oclass *gk104_ltc_oclass;
33extern struct nouveau_oclass *gm107_ltc_oclass;
34
35#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
deleted file mode 100644
index c9c1950b7743..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef __NOUVEAU_LTCG_H__
2#define __NOUVEAU_LTCG_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_mm_node;
8
9struct nouveau_ltcg {
10 struct nouveau_subdev base;
11
12 int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
13 struct nouveau_mm_node **);
14 void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
15 void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
16};
17
18static inline struct nouveau_ltcg *
19nouveau_ltcg(void *obj)
20{
21 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
22}
23
24#define nouveau_ltcg_create(p,e,o,d) \
25 nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \
26 sizeof(**d), (void **)d)
27#define nouveau_ltcg_destroy(p) \
28 nouveau_subdev_destroy(&(p)->base)
29#define nouveau_ltcg_init(p) \
30 nouveau_subdev_init(&(p)->base)
31#define nouveau_ltcg_fini(p,s) \
32 nouveau_subdev_fini(&(p)->base, (s))
33
34#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
35#define _nouveau_ltcg_init _nouveau_subdev_init
36#define _nouveau_ltcg_fini _nouveau_subdev_fini
37
38extern struct nouveau_oclass *gf100_ltcg_oclass;
39extern struct nouveau_oclass *gm107_ltcg_oclass;
40
41#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 72b176831be6..568e4dfc5e9e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -4,15 +4,11 @@
4#include <core/subdev.h> 4#include <core/subdev.h>
5#include <core/device.h> 5#include <core/device.h>
6 6
7struct nouveau_mc_intr {
8 u32 stat;
9 u32 unit;
10};
11
12struct nouveau_mc { 7struct nouveau_mc {
13 struct nouveau_subdev base; 8 struct nouveau_subdev base;
14 bool use_msi; 9 bool use_msi;
15 unsigned int irq; 10 unsigned int irq;
11 void (*unk260)(struct nouveau_mc *, u32);
16}; 12};
17 13
18static inline struct nouveau_mc * 14static inline struct nouveau_mc *
@@ -21,30 +17,6 @@ nouveau_mc(void *obj)
21 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 17 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
22} 18}
23 19
24#define nouveau_mc_create(p,e,o,d) \
25 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
26#define nouveau_mc_destroy(p) ({ \
27 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
28})
29#define nouveau_mc_init(p) ({ \
30 struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
31})
32#define nouveau_mc_fini(p,s) ({ \
33 struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
34})
35
36int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
37 struct nouveau_oclass *, int, void **);
38void _nouveau_mc_dtor(struct nouveau_object *);
39int _nouveau_mc_init(struct nouveau_object *);
40int _nouveau_mc_fini(struct nouveau_object *, bool);
41
42struct nouveau_mc_oclass {
43 struct nouveau_oclass base;
44 const struct nouveau_mc_intr *intr;
45 void (*msi_rearm)(struct nouveau_mc *);
46};
47
48extern struct nouveau_oclass *nv04_mc_oclass; 20extern struct nouveau_oclass *nv04_mc_oclass;
49extern struct nouveau_oclass *nv40_mc_oclass; 21extern struct nouveau_oclass *nv40_mc_oclass;
50extern struct nouveau_oclass *nv44_mc_oclass; 22extern struct nouveau_oclass *nv44_mc_oclass;
@@ -54,5 +26,6 @@ extern struct nouveau_oclass *nv94_mc_oclass;
54extern struct nouveau_oclass *nv98_mc_oclass; 26extern struct nouveau_oclass *nv98_mc_oclass;
55extern struct nouveau_oclass *nvc0_mc_oclass; 27extern struct nouveau_oclass *nvc0_mc_oclass;
56extern struct nouveau_oclass *nvc3_mc_oclass; 28extern struct nouveau_oclass *nvc3_mc_oclass;
29extern struct nouveau_oclass *gk20a_mc_oclass;
57 30
58#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index c5c92cbed33f..f73feec151db 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -8,18 +8,6 @@ struct nouveau_pwr {
8 struct nouveau_subdev base; 8 struct nouveau_subdev base;
9 9
10 struct { 10 struct {
11 u32 limit;
12 u32 *data;
13 u32 size;
14 } code;
15
16 struct {
17 u32 limit;
18 u32 *data;
19 u32 size;
20 } data;
21
22 struct {
23 u32 base; 11 u32 base;
24 u32 size; 12 u32 size;
25 } send; 13 } send;
@@ -35,7 +23,8 @@ struct nouveau_pwr {
35 u32 data[2]; 23 u32 data[2];
36 } recv; 24 } recv;
37 25
38 int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32); 26 int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
27 void (*pgob)(struct nouveau_pwr *, bool);
39}; 28};
40 29
41static inline struct nouveau_pwr * 30static inline struct nouveau_pwr *
@@ -44,29 +33,11 @@ nouveau_pwr(void *obj)
44 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR]; 33 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
45} 34}
46 35
47#define nouveau_pwr_create(p, e, o, d) \ 36extern struct nouveau_oclass *nva3_pwr_oclass;
48 nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d) 37extern struct nouveau_oclass *nvc0_pwr_oclass;
49#define nouveau_pwr_destroy(p) \ 38extern struct nouveau_oclass *nvd0_pwr_oclass;
50 nouveau_subdev_destroy(&(p)->base) 39extern struct nouveau_oclass *gk104_pwr_oclass;
51#define nouveau_pwr_init(p) ({ \ 40extern struct nouveau_oclass *nv108_pwr_oclass;
52 struct nouveau_pwr *ppwr = (p); \
53 _nouveau_pwr_init(nv_object(ppwr)); \
54})
55#define nouveau_pwr_fini(p,s) ({ \
56 struct nouveau_pwr *ppwr = (p); \
57 _nouveau_pwr_fini(nv_object(ppwr), (s)); \
58})
59
60int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
61 struct nouveau_oclass *, int, void **);
62#define _nouveau_pwr_dtor _nouveau_subdev_dtor
63int _nouveau_pwr_init(struct nouveau_object *);
64int _nouveau_pwr_fini(struct nouveau_object *, bool);
65
66extern struct nouveau_oclass nva3_pwr_oclass;
67extern struct nouveau_oclass nvc0_pwr_oclass;
68extern struct nouveau_oclass nvd0_pwr_oclass;
69extern struct nouveau_oclass nv108_pwr_oclass;
70 41
71/* interface to MEMX process running on PPWR */ 42/* interface to MEMX process running on PPWR */
72struct nouveau_memx; 43struct nouveau_memx;
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index d0ced94ca54c..ccfa21d72ddc 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -21,6 +21,8 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/power_supply.h>
25#include <linux/clk.h>
24 26
25#include <asm/unaligned.h> 27#include <asm/unaligned.h>
26 28
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
new file mode 100644
index 000000000000..bf877af9d3bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <subdev/bar.h>
24
25#include "priv.h"
26
27int
28gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
29 struct nouveau_oclass *oclass, void *data, u32 size,
30 struct nouveau_object **pobject)
31{
32 struct nouveau_bar *bar;
33 int ret;
34
35 ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject);
36 if (ret)
37 return ret;
38
39 bar = (struct nouveau_bar *)*pobject;
40 bar->iomap_uncached = true;
41
42 return 0;
43}
44
45struct nouveau_oclass
46gk20a_bar_oclass = {
47 .handle = NV_SUBDEV(BAR, 0xea),
48 .ofuncs = &(struct nouveau_ofuncs) {
49 .ctor = gk20a_bar_ctor,
50 .dtor = nvc0_bar_dtor,
51 .init = nvc0_bar_init,
52 .fini = _nouveau_bar_fini,
53 },
54};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index ca8139b9ab27..0a44459844e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -133,7 +133,7 @@ nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
133 return 0; 133 return 0;
134} 134}
135 135
136static int 136int
137nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 137nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
138 struct nouveau_oclass *oclass, void *data, u32 size, 138 struct nouveau_oclass *oclass, void *data, u32 size,
139 struct nouveau_object **pobject) 139 struct nouveau_object **pobject)
@@ -169,7 +169,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
169 return 0; 169 return 0;
170} 170}
171 171
172static void 172void
173nvc0_bar_dtor(struct nouveau_object *object) 173nvc0_bar_dtor(struct nouveau_object *object)
174{ 174{
175 struct nvc0_bar_priv *priv = (void *)object; 175 struct nvc0_bar_priv *priv = (void *)object;
@@ -188,7 +188,7 @@ nvc0_bar_dtor(struct nouveau_object *object)
188 nouveau_bar_destroy(&priv->base); 188 nouveau_bar_destroy(&priv->base);
189} 189}
190 190
191static int 191int
192nvc0_bar_init(struct nouveau_object *object) 192nvc0_bar_init(struct nouveau_object *object)
193{ 193{
194 struct nvc0_bar_priv *priv = (void *)object; 194 struct nvc0_bar_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
index ffad8f337ead..3ee8b1476d00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -23,4 +23,10 @@ int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
23 23
24void nv84_bar_flush(struct nouveau_bar *); 24void nv84_bar_flush(struct nouveau_bar *);
25 25
26int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *, u32,
28 struct nouveau_object **);
29void nvc0_bar_dtor(struct nouveau_object *);
30int nvc0_bar_init(struct nouveau_object *);
31
26#endif 32#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index 22351f594d2a..a276a711294a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -90,16 +90,20 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
90 cstate = &pstate->base; 90 cstate = &pstate->base;
91 } 91 }
92 92
93 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1); 93 if (ptherm) {
94 if (ret && ret != -ENODEV) { 94 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
95 nv_error(clk, "failed to raise fan speed: %d\n", ret); 95 if (ret && ret != -ENODEV) {
96 return ret; 96 nv_error(clk, "failed to raise fan speed: %d\n", ret);
97 return ret;
98 }
97 } 99 }
98 100
99 ret = volt->set_id(volt, cstate->voltage, +1); 101 if (volt) {
100 if (ret && ret != -ENODEV) { 102 ret = volt->set_id(volt, cstate->voltage, +1);
101 nv_error(clk, "failed to raise voltage: %d\n", ret); 103 if (ret && ret != -ENODEV) {
102 return ret; 104 nv_error(clk, "failed to raise voltage: %d\n", ret);
105 return ret;
106 }
103 } 107 }
104 108
105 ret = clk->calc(clk, cstate); 109 ret = clk->calc(clk, cstate);
@@ -108,13 +112,17 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
108 clk->tidy(clk); 112 clk->tidy(clk);
109 } 113 }
110 114
111 ret = volt->set_id(volt, cstate->voltage, -1); 115 if (volt) {
112 if (ret && ret != -ENODEV) 116 ret = volt->set_id(volt, cstate->voltage, -1);
113 nv_error(clk, "failed to lower voltage: %d\n", ret); 117 if (ret && ret != -ENODEV)
118 nv_error(clk, "failed to lower voltage: %d\n", ret);
119 }
114 120
115 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1); 121 if (ptherm) {
116 if (ret && ret != -ENODEV) 122 ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
117 nv_error(clk, "failed to lower fan speed: %d\n", ret); 123 if (ret && ret != -ENODEV)
124 nv_error(clk, "failed to lower fan speed: %d\n", ret);
125 }
118 126
119 return 0; 127 return 0;
120} 128}
@@ -194,16 +202,23 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
194 return nouveau_cstate_prog(clk, pstate, 0); 202 return nouveau_cstate_prog(clk, pstate, 0);
195} 203}
196 204
197static int 205static void
198nouveau_pstate_calc(struct nouveau_clock *clk) 206nouveau_pstate_work(struct work_struct *work)
199{ 207{
200 int pstate, ret = 0; 208 struct nouveau_clock *clk = container_of(work, typeof(*clk), work);
209 int pstate;
201 210
202 nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate, 211 if (!atomic_xchg(&clk->waiting, 0))
203 clk->ustate, clk->astate, clk->tstate, clk->dstate); 212 return;
213 clk->pwrsrc = power_supply_is_system_supplied();
204 214
205 if (clk->state_nr && clk->ustate != -1) { 215 nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
206 pstate = (clk->ustate < 0) ? clk->astate : clk->ustate; 216 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
217 clk->astate, clk->tstate, clk->dstate);
218
219 pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
220 if (clk->state_nr && pstate != -1) {
221 pstate = (pstate < 0) ? clk->astate : pstate;
207 pstate = min(pstate, clk->state_nr - 1 - clk->tstate); 222 pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
208 pstate = max(pstate, clk->dstate); 223 pstate = max(pstate, clk->dstate);
209 } else { 224 } else {
@@ -211,9 +226,26 @@ nouveau_pstate_calc(struct nouveau_clock *clk)
211 } 226 }
212 227
213 nv_trace(clk, "-> %d\n", pstate); 228 nv_trace(clk, "-> %d\n", pstate);
214 if (pstate != clk->pstate) 229 if (pstate != clk->pstate) {
215 ret = nouveau_pstate_prog(clk, pstate); 230 int ret = nouveau_pstate_prog(clk, pstate);
216 return ret; 231 if (ret) {
232 nv_error(clk, "error setting pstate %d: %d\n",
233 pstate, ret);
234 }
235 }
236
237 wake_up_all(&clk->wait);
238 nvkm_notify_get(&clk->pwrsrc_ntfy);
239}
240
241static int
242nouveau_pstate_calc(struct nouveau_clock *clk, bool wait)
243{
244 atomic_set(&clk->waiting, 1);
245 schedule_work(&clk->work);
246 if (wait)
247 wait_event(clk->wait, !atomic_read(&clk->waiting));
248 return 0;
217} 249}
218 250
219static void 251static void
@@ -361,17 +393,40 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
361 req = i; 393 req = i;
362 } 394 }
363 395
364 clk->ustate = req; 396 return req + 2;
365 return 0; 397}
398
399static int
400nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen)
401{
402 int ret = 1;
403
404 if (strncasecmpz(mode, "disabled", arglen)) {
405 char save = mode[arglen];
406 long v;
407
408 ((char *)mode)[arglen] = '\0';
409 if (!kstrtol(mode, 0, &v)) {
410 ret = nouveau_clock_ustate_update(clk, v);
411 if (ret < 0)
412 ret = 1;
413 }
414 ((char *)mode)[arglen] = save;
415 }
416
417 return ret - 2;
366} 418}
367 419
368int 420int
369nouveau_clock_ustate(struct nouveau_clock *clk, int req) 421nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr)
370{ 422{
371 int ret = nouveau_clock_ustate_update(clk, req); 423 int ret = nouveau_clock_ustate_update(clk, req);
372 if (ret) 424 if (ret >= 0) {
373 return ret; 425 if (ret -= 2, pwr) clk->ustate_ac = ret;
374 return nouveau_pstate_calc(clk); 426 else clk->ustate_dc = ret;
427 return nouveau_pstate_calc(clk, true);
428 }
429 return ret;
375} 430}
376 431
377int 432int
@@ -381,7 +436,7 @@ nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
381 if ( rel) clk->astate += rel; 436 if ( rel) clk->astate += rel;
382 clk->astate = min(clk->astate, clk->state_nr - 1); 437 clk->astate = min(clk->astate, clk->state_nr - 1);
383 clk->astate = max(clk->astate, 0); 438 clk->astate = max(clk->astate, 0);
384 return nouveau_pstate_calc(clk); 439 return nouveau_pstate_calc(clk, true);
385} 440}
386 441
387int 442int
@@ -391,7 +446,7 @@ nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
391 if ( rel) clk->tstate += rel; 446 if ( rel) clk->tstate += rel;
392 clk->tstate = min(clk->tstate, 0); 447 clk->tstate = min(clk->tstate, 0);
393 clk->tstate = max(clk->tstate, -(clk->state_nr - 1)); 448 clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
394 return nouveau_pstate_calc(clk); 449 return nouveau_pstate_calc(clk, true);
395} 450}
396 451
397int 452int
@@ -401,12 +456,30 @@ nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
401 if ( rel) clk->dstate += rel; 456 if ( rel) clk->dstate += rel;
402 clk->dstate = min(clk->dstate, clk->state_nr - 1); 457 clk->dstate = min(clk->dstate, clk->state_nr - 1);
403 clk->dstate = max(clk->dstate, 0); 458 clk->dstate = max(clk->dstate, 0);
404 return nouveau_pstate_calc(clk); 459 return nouveau_pstate_calc(clk, true);
460}
461
462static int
463nouveau_clock_pwrsrc(struct nvkm_notify *notify)
464{
465 struct nouveau_clock *clk =
466 container_of(notify, typeof(*clk), pwrsrc_ntfy);
467 nouveau_pstate_calc(clk, false);
468 return NVKM_NOTIFY_DROP;
405} 469}
406 470
407/****************************************************************************** 471/******************************************************************************
408 * subdev base class implementation 472 * subdev base class implementation
409 *****************************************************************************/ 473 *****************************************************************************/
474
475int
476_nouveau_clock_fini(struct nouveau_object *object, bool suspend)
477{
478 struct nouveau_clock *clk = (void *)object;
479 nvkm_notify_put(&clk->pwrsrc_ntfy);
480 return nouveau_subdev_fini(&clk->base, suspend);
481}
482
410int 483int
411_nouveau_clock_init(struct nouveau_object *object) 484_nouveau_clock_init(struct nouveau_object *object)
412{ 485{
@@ -414,6 +487,10 @@ _nouveau_clock_init(struct nouveau_object *object)
414 struct nouveau_clocks *clock = clk->domains; 487 struct nouveau_clocks *clock = clk->domains;
415 int ret; 488 int ret;
416 489
490 ret = nouveau_subdev_init(&clk->base);
491 if (ret)
492 return ret;
493
417 memset(&clk->bstate, 0x00, sizeof(clk->bstate)); 494 memset(&clk->bstate, 0x00, sizeof(clk->bstate));
418 INIT_LIST_HEAD(&clk->bstate.list); 495 INIT_LIST_HEAD(&clk->bstate.list);
419 clk->bstate.pstate = 0xff; 496 clk->bstate.pstate = 0xff;
@@ -434,7 +511,7 @@ _nouveau_clock_init(struct nouveau_object *object)
434 clk->tstate = 0; 511 clk->tstate = 0;
435 clk->dstate = 0; 512 clk->dstate = 0;
436 clk->pstate = -1; 513 clk->pstate = -1;
437 nouveau_pstate_calc(clk); 514 nouveau_pstate_calc(clk, true);
438 return 0; 515 return 0;
439} 516}
440 517
@@ -444,6 +521,8 @@ _nouveau_clock_dtor(struct nouveau_object *object)
444 struct nouveau_clock *clk = (void *)object; 521 struct nouveau_clock *clk = (void *)object;
445 struct nouveau_pstate *pstate, *temp; 522 struct nouveau_pstate *pstate, *temp;
446 523
524 nvkm_notify_fini(&clk->pwrsrc_ntfy);
525
447 list_for_each_entry_safe(pstate, temp, &clk->states, head) { 526 list_for_each_entry_safe(pstate, temp, &clk->states, head) {
448 nouveau_pstate_del(pstate); 527 nouveau_pstate_del(pstate);
449 } 528 }
@@ -456,6 +535,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
456 struct nouveau_object *engine, 535 struct nouveau_object *engine,
457 struct nouveau_oclass *oclass, 536 struct nouveau_oclass *oclass,
458 struct nouveau_clocks *clocks, 537 struct nouveau_clocks *clocks,
538 struct nouveau_pstate *pstates, int nb_pstates,
459 bool allow_reclock, 539 bool allow_reclock,
460 int length, void **object) 540 int length, void **object)
461{ 541{
@@ -472,29 +552,46 @@ nouveau_clock_create_(struct nouveau_object *parent,
472 552
473 INIT_LIST_HEAD(&clk->states); 553 INIT_LIST_HEAD(&clk->states);
474 clk->domains = clocks; 554 clk->domains = clocks;
475 clk->ustate = -1; 555 clk->ustate_ac = -1;
556 clk->ustate_dc = -1;
557
558 INIT_WORK(&clk->work, nouveau_pstate_work);
559 init_waitqueue_head(&clk->wait);
560 atomic_set(&clk->waiting, 0);
476 561
477 idx = 0; 562 /* If no pstates are provided, try and fetch them from the BIOS */
478 do { 563 if (!pstates) {
479 ret = nouveau_pstate_new(clk, idx++); 564 idx = 0;
480 } while (ret == 0); 565 do {
566 ret = nouveau_pstate_new(clk, idx++);
567 } while (ret == 0);
568 } else {
569 for (idx = 0; idx < nb_pstates; idx++)
570 list_add_tail(&pstates[idx].head, &clk->states);
571 clk->state_nr = nb_pstates;
572 }
481 573
482 clk->allow_reclock = allow_reclock; 574 clk->allow_reclock = allow_reclock;
483 575
576 ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true,
577 NULL, 0, 0, &clk->pwrsrc_ntfy);
578 if (ret)
579 return ret;
580
484 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen); 581 mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
485 if (mode) { 582 if (mode) {
486 if (!strncasecmpz(mode, "disabled", arglen)) { 583 clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
487 clk->ustate = -1; 584 clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
488 } else {
489 char save = mode[arglen];
490 long v;
491
492 ((char *)mode)[arglen] = '\0';
493 if (!kstrtol(mode, 0, &v))
494 nouveau_clock_ustate_update(clk, v);
495 ((char *)mode)[arglen] = save;
496 }
497 } 585 }
498 586
587 mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen);
588 if (mode)
589 clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
590
591 mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen);
592 if (mode)
593 clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
594
595
499 return 0; 596 return 0;
500} 597}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
new file mode 100644
index 000000000000..425a8d5e9129
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -0,0 +1,665 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
23 *
24 */
25
26#define MHZ (1000 * 1000)
27
28#define MASK(w) ((1 << w) - 1)
29
30#define SYS_GPCPLL_CFG_BASE 0x00137000
31#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
32
33#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
34#define GPCPLL_CFG_ENABLE BIT(0)
35#define GPCPLL_CFG_IDDQ BIT(1)
36#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
37#define GPCPLL_CFG_LOCK BIT(17)
38
39#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
40#define GPCPLL_COEFF_M_SHIFT 0
41#define GPCPLL_COEFF_M_WIDTH 8
42#define GPCPLL_COEFF_N_SHIFT 8
43#define GPCPLL_COEFF_N_WIDTH 8
44#define GPCPLL_COEFF_P_SHIFT 16
45#define GPCPLL_COEFF_P_WIDTH 6
46
47#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
48#define GPCPLL_CFG2_SETUP2_SHIFT 16
49#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
50
51#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
52#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
53
54#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
55#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
56#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
57#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
58#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
59#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
60
61#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
62#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
63
64#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
65#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
66#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
67#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
68#define GPC2CLK_OUT_VCODIV_WIDTH 6
69#define GPC2CLK_OUT_VCODIV_SHIFT 8
70#define GPC2CLK_OUT_VCODIV1 0
71#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
72 GPC2CLK_OUT_VCODIV_SHIFT)
73#define GPC2CLK_OUT_BYPDIV_WIDTH 6
74#define GPC2CLK_OUT_BYPDIV_SHIFT 0
75#define GPC2CLK_OUT_BYPDIV31 0x3c
76#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
77 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
78 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
79 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
80#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
81 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
82 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
83 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
84
85#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
86#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
87#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
88 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
89
90#include <subdev/clock.h>
91#include <subdev/timer.h>
92
93#ifdef __KERNEL__
94#include <nouveau_platform.h>
95#endif
96
97static const u8 pl_to_div[] = {
98/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
99/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
100};
101
102/* All frequencies in Mhz */
103struct gk20a_clk_pllg_params {
104 u32 min_vco, max_vco;
105 u32 min_u, max_u;
106 u32 min_m, max_m;
107 u32 min_n, max_n;
108 u32 min_pl, max_pl;
109};
110
111static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
112 .min_vco = 1000, .max_vco = 1700,
113 .min_u = 12, .max_u = 38,
114 .min_m = 1, .max_m = 255,
115 .min_n = 8, .max_n = 255,
116 .min_pl = 1, .max_pl = 32,
117};
118
119struct gk20a_clock_priv {
120 struct nouveau_clock base;
121 const struct gk20a_clk_pllg_params *params;
122 u32 m, n, pl;
123 u32 parent_rate;
124};
125#define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base)
126
127static void
128gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv)
129{
130 u32 val;
131
132 val = nv_rd32(priv, GPCPLL_COEFF);
133 priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
134 priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
135 priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
136}
137
138static u32
139gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv)
140{
141 u32 rate;
142 u32 divider;
143
144 rate = priv->parent_rate * priv->n;
145 divider = priv->m * pl_to_div[priv->pl];
146 do_div(rate, divider);
147
148 return rate / 2;
149}
150
151static int
152gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate)
153{
154 u32 target_clk_f, ref_clk_f, target_freq;
155 u32 min_vco_f, max_vco_f;
156 u32 low_pl, high_pl, best_pl;
157 u32 target_vco_f, vco_f;
158 u32 best_m, best_n;
159 u32 u_f;
160 u32 m, n, n2;
161 u32 delta, lwv, best_delta = ~0;
162 u32 pl;
163
164 target_clk_f = rate * 2 / MHZ;
165 ref_clk_f = priv->parent_rate / MHZ;
166
167 max_vco_f = priv->params->max_vco;
168 min_vco_f = priv->params->min_vco;
169 best_m = priv->params->max_m;
170 best_n = priv->params->min_n;
171 best_pl = priv->params->min_pl;
172
173 target_vco_f = target_clk_f + target_clk_f / 50;
174 if (max_vco_f < target_vco_f)
175 max_vco_f = target_vco_f;
176
177 /* min_pl <= high_pl <= max_pl */
178 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
179 high_pl = min(high_pl, priv->params->max_pl);
180 high_pl = max(high_pl, priv->params->min_pl);
181
182 /* min_pl <= low_pl <= max_pl */
183 low_pl = min_vco_f / target_vco_f;
184 low_pl = min(low_pl, priv->params->max_pl);
185 low_pl = max(low_pl, priv->params->min_pl);
186
187 /* Find Indices of high_pl and low_pl */
188 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
189 if (pl_to_div[pl] >= low_pl) {
190 low_pl = pl;
191 break;
192 }
193 }
194 for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
195 if (pl_to_div[pl] >= high_pl) {
196 high_pl = pl;
197 break;
198 }
199 }
200
201 nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
202 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
203
204 /* Select lowest possible VCO */
205 for (pl = low_pl; pl <= high_pl; pl++) {
206 target_vco_f = target_clk_f * pl_to_div[pl];
207 for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
208 u_f = ref_clk_f / m;
209
210 if (u_f < priv->params->min_u)
211 break;
212 if (u_f > priv->params->max_u)
213 continue;
214
215 n = (target_vco_f * m) / ref_clk_f;
216 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
217
218 if (n > priv->params->max_n)
219 break;
220
221 for (; n <= n2; n++) {
222 if (n < priv->params->min_n)
223 continue;
224 if (n > priv->params->max_n)
225 break;
226
227 vco_f = ref_clk_f * n / m;
228
229 if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
230 lwv = (vco_f + (pl_to_div[pl] / 2))
231 / pl_to_div[pl];
232 delta = abs(lwv - target_clk_f);
233
234 if (delta < best_delta) {
235 best_delta = delta;
236 best_m = m;
237 best_n = n;
238 best_pl = pl;
239
240 if (best_delta == 0)
241 goto found_match;
242 }
243 }
244 }
245 }
246 }
247
248found_match:
249 WARN_ON(best_delta == ~0);
250
251 if (best_delta != 0)
252 nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
253 target_clk_f);
254
255 priv->m = best_m;
256 priv->n = best_n;
257 priv->pl = best_pl;
258
259 target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
260
261 nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
262 target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
263
264 return 0;
265}
266
267static int
268gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n)
269{
270 u32 val;
271 int ramp_timeout;
272
273 /* get old coefficients */
274 val = nv_rd32(priv, GPCPLL_COEFF);
275 /* do nothing if NDIV is the same */
276 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
277 return 0;
278
279 /* setup */
280 nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
281 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
282 nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
283 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
284
285 /* pll slowdown mode */
286 nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
287 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
288 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
289
290 /* new ndiv ready for ramp */
291 val = nv_rd32(priv, GPCPLL_COEFF);
292 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
293 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
294 udelay(1);
295 nv_wr32(priv, GPCPLL_COEFF, val);
296
297 /* dynamic ramp to new ndiv */
298 val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
299 val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
300 udelay(1);
301 nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
302
303 for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
304 udelay(1);
305 val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
306 if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
307 break;
308 }
309
310 /* exit slowdown mode */
311 nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
312 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
313 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
314 nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
315
316 if (ramp_timeout <= 0) {
317 nv_error(priv, "gpcpll dynamic ramp timeout\n");
318 return -ETIMEDOUT;
319 }
320
321 return 0;
322}
323
324static void
325_gk20a_pllg_enable(struct gk20a_clock_priv *priv)
326{
327 nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
328 nv_rd32(priv, GPCPLL_CFG);
329}
330
331static void
332_gk20a_pllg_disable(struct gk20a_clock_priv *priv)
333{
334 nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
335 nv_rd32(priv, GPCPLL_CFG);
336}
337
338static int
339_gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
340{
341 u32 val, cfg;
342 u32 m_old, pl_old, n_lo;
343
344 /* get old coefficients */
345 val = nv_rd32(priv, GPCPLL_COEFF);
346 m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
347 pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
348
349 /* do NDIV slide if there is no change in M and PL */
350 cfg = nv_rd32(priv, GPCPLL_CFG);
351 if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
352 (cfg & GPCPLL_CFG_ENABLE)) {
353 return gk20a_pllg_slide(priv, priv->n);
354 }
355
356 /* slide down to NDIV_LO */
357 n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
358 priv->parent_rate / MHZ);
359 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
360 int ret = gk20a_pllg_slide(priv, n_lo);
361
362 if (ret)
363 return ret;
364 }
365
366 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
367 nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
368 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
369
370 /* put PLL in bypass before programming it */
371 val = nv_rd32(priv, SEL_VCO);
372 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
373 udelay(2);
374 nv_wr32(priv, SEL_VCO, val);
375
376 /* get out from IDDQ */
377 val = nv_rd32(priv, GPCPLL_CFG);
378 if (val & GPCPLL_CFG_IDDQ) {
379 val &= ~GPCPLL_CFG_IDDQ;
380 nv_wr32(priv, GPCPLL_CFG, val);
381 nv_rd32(priv, GPCPLL_CFG);
382 udelay(2);
383 }
384
385 _gk20a_pllg_disable(priv);
386
387 nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
388 priv->pl);
389
390 n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
391 priv->parent_rate / MHZ);
392 val = priv->m << GPCPLL_COEFF_M_SHIFT;
393 val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
394 val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
395 nv_wr32(priv, GPCPLL_COEFF, val);
396
397 _gk20a_pllg_enable(priv);
398
399 val = nv_rd32(priv, GPCPLL_CFG);
400 if (val & GPCPLL_CFG_LOCK_DET_OFF) {
401 val &= ~GPCPLL_CFG_LOCK_DET_OFF;
402 nv_wr32(priv, GPCPLL_CFG, val);
403 }
404
405 if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
406 GPCPLL_CFG_LOCK)) {
407 nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
408 return -ETIMEDOUT;
409 }
410
411 /* switch to VCO mode */
412 nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
413
414 /* restore out divider 1:1 */
415 val = nv_rd32(priv, GPC2CLK_OUT);
416 val &= ~GPC2CLK_OUT_VCODIV_MASK;
417 udelay(2);
418 nv_wr32(priv, GPC2CLK_OUT, val);
419
420 /* slide up to new NDIV */
421 return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
422}
423
424static int
425gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv)
426{
427 int err;
428
429 err = _gk20a_pllg_program_mnp(priv, true);
430 if (err)
431 err = _gk20a_pllg_program_mnp(priv, false);
432
433 return err;
434}
435
436static void
437gk20a_pllg_disable(struct gk20a_clock_priv *priv)
438{
439 u32 val;
440
441 /* slide to VCO min */
442 val = nv_rd32(priv, GPCPLL_CFG);
443 if (val & GPCPLL_CFG_ENABLE) {
444 u32 coeff, m, n_lo;
445
446 coeff = nv_rd32(priv, GPCPLL_COEFF);
447 m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
448 n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
449 priv->parent_rate / MHZ);
450 gk20a_pllg_slide(priv, n_lo);
451 }
452
453 /* put PLL in bypass before disabling it */
454 nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
455
456 _gk20a_pllg_disable(priv);
457}
458
459#define GK20A_CLK_GPC_MDIV 1000
460
461static struct nouveau_clocks
462gk20a_domains[] = {
463 { nv_clk_src_crystal, 0xff },
464 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
465 { nv_clk_src_max }
466};
467
468static struct nouveau_pstate
469gk20a_pstates[] = {
470 {
471 .base = {
472 .domain[nv_clk_src_gpc] = 72000,
473 },
474 },
475 {
476 .base = {
477 .domain[nv_clk_src_gpc] = 108000,
478 },
479 },
480 {
481 .base = {
482 .domain[nv_clk_src_gpc] = 180000,
483 },
484 },
485 {
486 .base = {
487 .domain[nv_clk_src_gpc] = 252000,
488 },
489 },
490 {
491 .base = {
492 .domain[nv_clk_src_gpc] = 324000,
493 },
494 },
495 {
496 .base = {
497 .domain[nv_clk_src_gpc] = 396000,
498 },
499 },
500 {
501 .base = {
502 .domain[nv_clk_src_gpc] = 468000,
503 },
504 },
505 {
506 .base = {
507 .domain[nv_clk_src_gpc] = 540000,
508 },
509 },
510 {
511 .base = {
512 .domain[nv_clk_src_gpc] = 612000,
513 },
514 },
515 {
516 .base = {
517 .domain[nv_clk_src_gpc] = 648000,
518 },
519 },
520 {
521 .base = {
522 .domain[nv_clk_src_gpc] = 684000,
523 },
524 },
525 {
526 .base = {
527 .domain[nv_clk_src_gpc] = 708000,
528 },
529 },
530 {
531 .base = {
532 .domain[nv_clk_src_gpc] = 756000,
533 },
534 },
535 {
536 .base = {
537 .domain[nv_clk_src_gpc] = 804000,
538 },
539 },
540 {
541 .base = {
542 .domain[nv_clk_src_gpc] = 852000,
543 },
544 },
545};
546
547static int
548gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
549{
550 struct gk20a_clock_priv *priv = (void *)clk;
551
552 switch (src) {
553 case nv_clk_src_crystal:
554 return nv_device(clk)->crystal;
555 case nv_clk_src_gpc:
556 gk20a_pllg_read_mnp(priv);
557 return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
558 default:
559 nv_error(clk, "invalid clock source %d\n", src);
560 return -EINVAL;
561 }
562}
563
564static int
565gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
566{
567 struct gk20a_clock_priv *priv = (void *)clk;
568
569 return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
570 GK20A_CLK_GPC_MDIV);
571}
572
573static int
574gk20a_clock_prog(struct nouveau_clock *clk)
575{
576 struct gk20a_clock_priv *priv = (void *)clk;
577
578 return gk20a_pllg_program_mnp(priv);
579}
580
581static void
582gk20a_clock_tidy(struct nouveau_clock *clk)
583{
584}
585
586static int
587gk20a_clock_fini(struct nouveau_object *object, bool suspend)
588{
589 struct gk20a_clock_priv *priv = (void *)object;
590 int ret;
591
592 ret = nouveau_clock_fini(&priv->base, false);
593
594 gk20a_pllg_disable(priv);
595
596 return ret;
597}
598
599static int
600gk20a_clock_init(struct nouveau_object *object)
601{
602 struct gk20a_clock_priv *priv = (void *)object;
603 int ret;
604
605 nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
606
607 ret = nouveau_clock_init(&priv->base);
608 if (ret)
609 return ret;
610
611 ret = gk20a_clock_prog(&priv->base);
612 if (ret) {
613 nv_error(priv, "cannot initialize clock\n");
614 return ret;
615 }
616
617 return 0;
618}
619
620static int
621gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
622 struct nouveau_oclass *oclass, void *data, u32 size,
623 struct nouveau_object **pobject)
624{
625 struct gk20a_clock_priv *priv;
626 struct nouveau_platform_device *plat;
627 int ret;
628 int i;
629
630 /* Finish initializing the pstates */
631 for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
632 INIT_LIST_HEAD(&gk20a_pstates[i].list);
633 gk20a_pstates[i].pstate = i + 1;
634 }
635
636 ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains,
637 gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv);
638 *pobject = nv_object(priv);
639 if (ret)
640 return ret;
641
642 priv->params = &gk20a_pllg_params;
643
644 plat = nv_device_to_platform(nv_device(parent));
645 priv->parent_rate = clk_get_rate(plat->gpu->clk);
646 nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
647
648 priv->base.read = gk20a_clock_read;
649 priv->base.calc = gk20a_clock_calc;
650 priv->base.prog = gk20a_clock_prog;
651 priv->base.tidy = gk20a_clock_tidy;
652
653 return 0;
654}
655
656struct nouveau_oclass
657gk20a_clock_oclass = {
658 .handle = NV_SUBDEV(CLOCK, 0xea),
659 .ofuncs = &(struct nouveau_ofuncs) {
660 .ctor = gk20a_clock_ctor,
661 .dtor = _nouveau_subdev_dtor,
662 .init = gk20a_clock_init,
663 .fini = gk20a_clock_fini,
664 },
665};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index eb2d4425a49e..4c48232686be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -82,8 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
82 struct nv04_clock_priv *priv; 82 struct nv04_clock_priv *priv;
83 int ret; 83 int ret;
84 84
85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false, 85 ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0,
86 &priv); 86 false, &priv);
87 *pobject = nv_object(priv); 87 *pobject = nv_object(priv);
88 if (ret) 88 if (ret)
89 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 8a9e16839791..08368fe97029 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -213,8 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
213 struct nv40_clock_priv *priv; 213 struct nv40_clock_priv *priv;
214 int ret; 214 int ret;
215 215
216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true, 216 ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0,
217 &priv); 217 true, &priv);
218 *pobject = nv_object(priv); 218 *pobject = nv_object(priv);
219 if (ret) 219 if (ret)
220 return ret; 220 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index 8c132772ba9e..5070ebc260f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
507 int ret; 507 int ret;
508 508
509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains, 509 ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
510 false, &priv); 510 NULL, 0, false, &priv);
511 *pobject = nv_object(priv); 511 *pobject = nv_object(priv);
512 if (ret) 512 if (ret)
513 return ret; 513 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 9fb58354a80b..087012b18956 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -302,8 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
302 struct nva3_clock_priv *priv; 302 struct nva3_clock_priv *priv;
303 int ret; 303 int ret;
304 304
305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false, 305 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
306 &priv); 306 false, &priv);
307 *pobject = nv_object(priv); 307 *pobject = nv_object(priv);
308 if (ret) 308 if (ret)
309 return ret; 309 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 6a65fc9e9663..74e19731b1b7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -421,8 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
421 struct nvaa_clock_priv *priv; 421 struct nvaa_clock_priv *priv;
422 int ret; 422 int ret;
423 423
424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true, 424 ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL,
425 &priv); 425 0, true, &priv);
426 *pobject = nv_object(priv); 426 *pobject = nv_object(priv);
427 if (ret) 427 if (ret)
428 return ret; 428 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index dbf8517f54da..1234abaab2db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -437,8 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
437 struct nvc0_clock_priv *priv; 437 struct nvc0_clock_priv *priv;
438 int ret; 438 int ret;
439 439
440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false, 440 ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0,
441 &priv); 441 false, &priv);
442 *pobject = nv_object(priv); 442 *pobject = nv_object(priv);
443 if (ret) 443 if (ret)
444 return ret; 444 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 0e62a3240144..7eccad57512e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -475,8 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
475 struct nve0_clock_priv *priv; 475 struct nve0_clock_priv *priv;
476 int ret; 476 int ret;
477 477
478 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true, 478 ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0,
479 &priv); 479 true, &priv);
480 *pobject = nv_object(priv); 480 *pobject = nv_object(priv);
481 if (ret) 481 if (ret)
482 return ret; 482 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 1fc55c1e91a1..4150b0d10af8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -250,9 +250,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
250 250
251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
252 if (priv->r100c08_page) { 252 if (priv->r100c08_page) {
253 priv->r100c08 = nv_device_map_page(device, priv->r100c08_page); 253 priv->r100c08 = dma_map_page(nv_device_base(device),
254 if (!priv->r100c08) 254 priv->r100c08_page, 0, PAGE_SIZE,
255 nv_warn(priv, "failed 0x100c08 page map\n"); 255 DMA_BIDIRECTIONAL);
256 if (dma_mapping_error(nv_device_base(device), priv->r100c08))
257 return -EFAULT;
256 } else { 258 } else {
257 nv_warn(priv, "failed 0x100c08 page alloc\n"); 259 nv_warn(priv, "failed 0x100c08 page alloc\n");
258 } 260 }
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
268 struct nv50_fb_priv *priv = (void *)object; 270 struct nv50_fb_priv *priv = (void *)object;
269 271
270 if (priv->r100c08_page) { 272 if (priv->r100c08_page) {
271 nv_device_unmap_page(device, priv->r100c08); 273 dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
274 DMA_BIDIRECTIONAL);
272 __free_page(priv->r100c08_page); 275 __free_page(priv->r100c08_page);
273 } 276 }
274 277
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 0670ae33ee45..b19a2b3c1081 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
70 struct nvc0_fb_priv *priv = (void *)object; 70 struct nvc0_fb_priv *priv = (void *)object;
71 71
72 if (priv->r100c10_page) { 72 if (priv->r100c10_page) {
73 nv_device_unmap_page(device, priv->r100c10); 73 dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
74 DMA_BIDIRECTIONAL);
74 __free_page(priv->r100c10_page); 75 __free_page(priv->r100c10_page);
75 } 76 }
76 77
@@ -93,8 +94,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
93 94
94 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 95 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
95 if (priv->r100c10_page) { 96 if (priv->r100c10_page) {
96 priv->r100c10 = nv_device_map_page(device, priv->r100c10_page); 97 priv->r100c10 = dma_map_page(nv_device_base(device),
97 if (!priv->r100c10) 98 priv->r100c10_page, 0, PAGE_SIZE,
99 DMA_BIDIRECTIONAL);
100 if (dma_mapping_error(nv_device_base(device), priv->r100c10))
98 return -EFAULT; 101 return -EFAULT;
99 } 102 }
100 103
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 5a6a5027f749..946518572346 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -26,7 +26,7 @@
26#include <subdev/bios/pll.h> 26#include <subdev/bios/pll.h>
27#include <subdev/bios/rammap.h> 27#include <subdev/bios/rammap.h>
28#include <subdev/bios/timing.h> 28#include <subdev/bios/timing.h>
29#include <subdev/ltcg.h> 29#include <subdev/ltc.h>
30 30
31#include <subdev/clock.h> 31#include <subdev/clock.h>
32#include <subdev/clock/pll.h> 32#include <subdev/clock/pll.h>
@@ -425,7 +425,7 @@ extern const u8 nvc0_pte_storage_type_map[256];
425void 425void
426nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) 426nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
427{ 427{
428 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); 428 struct nouveau_ltc *ltc = nouveau_ltc(pfb);
429 struct nouveau_mem *mem = *pmem; 429 struct nouveau_mem *mem = *pmem;
430 430
431 *pmem = NULL; 431 *pmem = NULL;
@@ -434,7 +434,7 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
434 434
435 mutex_lock(&pfb->base.mutex); 435 mutex_lock(&pfb->base.mutex);
436 if (mem->tag) 436 if (mem->tag)
437 ltcg->tags_free(ltcg, &mem->tag); 437 ltc->tags_free(ltc, &mem->tag);
438 __nv50_ram_put(pfb, mem); 438 __nv50_ram_put(pfb, mem);
439 mutex_unlock(&pfb->base.mutex); 439 mutex_unlock(&pfb->base.mutex);
440 440
@@ -468,12 +468,12 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
468 468
469 mutex_lock(&pfb->base.mutex); 469 mutex_lock(&pfb->base.mutex);
470 if (comp) { 470 if (comp) {
471 struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); 471 struct nouveau_ltc *ltc = nouveau_ltc(pfb);
472 472
473 /* compression only works with lpages */ 473 /* compression only works with lpages */
474 if (align == (1 << (17 - 12))) { 474 if (align == (1 << (17 - 12))) {
475 int n = size >> 5; 475 int n = size >> 5;
476 ltcg->tags_alloc(ltcg, n, &mem->tag); 476 ltc->tags_alloc(ltc, n, &mem->tag);
477 } 477 }
478 478
479 if (unlikely(!mem->tag)) 479 if (unlikely(!mem->tag))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 45e0202f3151..b1e3ed7c8beb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -106,39 +106,59 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
106} 106}
107 107
108static void 108static void
109nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index) 109nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index)
110{ 110{
111 struct nouveau_gpio *gpio = nouveau_gpio(event->priv); 111 struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
112 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 112 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
113 impl->intr_mask(gpio, type, 1 << index, 0); 113 impl->intr_mask(gpio, type, 1 << index, 0);
114} 114}
115 115
116static void 116static void
117nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index) 117nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index)
118{ 118{
119 struct nouveau_gpio *gpio = nouveau_gpio(event->priv); 119 struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
120 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 120 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
121 impl->intr_mask(gpio, type, 1 << index, 1 << index); 121 impl->intr_mask(gpio, type, 1 << index, 1 << index);
122} 122}
123 123
124static int
125nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
126{
127 struct nvkm_gpio_ntfy_req *req = data;
128 if (!WARN_ON(size != sizeof(*req))) {
129 notify->size = sizeof(struct nvkm_gpio_ntfy_rep);
130 notify->types = req->mask;
131 notify->index = req->line;
132 return 0;
133 }
134 return -EINVAL;
135}
136
124static void 137static void
125nouveau_gpio_intr(struct nouveau_subdev *subdev) 138nouveau_gpio_intr(struct nouveau_subdev *subdev)
126{ 139{
127 struct nouveau_gpio *gpio = nouveau_gpio(subdev); 140 struct nouveau_gpio *gpio = nouveau_gpio(subdev);
128 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; 141 const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
129 u32 hi, lo, e, i; 142 u32 hi, lo, i;
130 143
131 impl->intr_stat(gpio, &hi, &lo); 144 impl->intr_stat(gpio, &hi, &lo);
132 145
133 for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) { 146 for (i = 0; (hi | lo) && i < impl->lines; i++) {
134 if (hi & (1 << i)) 147 struct nvkm_gpio_ntfy_rep rep = {
135 e |= NVKM_GPIO_HI; 148 .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
136 if (lo & (1 << i)) 149 (NVKM_GPIO_LO * !!(lo & (1 << i))),
137 e |= NVKM_GPIO_LO; 150 };
138 nouveau_event_trigger(gpio->events, e, i); 151 nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep));
139 } 152 }
140} 153}
141 154
155static const struct nvkm_event_func
156nouveau_gpio_intr_func = {
157 .ctor = nouveau_gpio_intr_ctor,
158 .init = nouveau_gpio_intr_init,
159 .fini = nouveau_gpio_intr_fini,
160};
161
142int 162int
143_nouveau_gpio_fini(struct nouveau_object *object, bool suspend) 163_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
144{ 164{
@@ -183,7 +203,7 @@ void
183_nouveau_gpio_dtor(struct nouveau_object *object) 203_nouveau_gpio_dtor(struct nouveau_object *object)
184{ 204{
185 struct nouveau_gpio *gpio = (void *)object; 205 struct nouveau_gpio *gpio = (void *)object;
186 nouveau_event_destroy(&gpio->events); 206 nvkm_event_fini(&gpio->event);
187 nouveau_subdev_destroy(&gpio->base); 207 nouveau_subdev_destroy(&gpio->base);
188} 208}
189 209
@@ -208,13 +228,11 @@ nouveau_gpio_create_(struct nouveau_object *parent,
208 gpio->get = nouveau_gpio_get; 228 gpio->get = nouveau_gpio_get;
209 gpio->reset = impl->reset; 229 gpio->reset = impl->reset;
210 230
211 ret = nouveau_event_create(2, impl->lines, &gpio->events); 231 ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines,
232 &gpio->event);
212 if (ret) 233 if (ret)
213 return ret; 234 return ret;
214 235
215 gpio->events->priv = gpio;
216 gpio->events->enable = nouveau_gpio_intr_enable;
217 gpio->events->disable = nouveau_gpio_intr_disable;
218 nv_subdev(gpio)->intr = nouveau_gpio_intr; 236 nv_subdev(gpio)->intr = nouveau_gpio_intr;
219 return 0; 237 return 0;
220} 238}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 09ba2cc851cf..a652cafde3d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -326,9 +326,9 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
326} 326}
327 327
328static void 328static void
329nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index) 329nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index)
330{ 330{
331 struct nouveau_i2c *i2c = nouveau_i2c(event->priv); 331 struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
332 struct nouveau_i2c_port *port = i2c->find(i2c, index); 332 struct nouveau_i2c_port *port = i2c->find(i2c, index);
333 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; 333 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
334 if (port && port->aux >= 0) 334 if (port && port->aux >= 0)
@@ -336,15 +336,28 @@ nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
336} 336}
337 337
338static void 338static void
339nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index) 339nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index)
340{ 340{
341 struct nouveau_i2c *i2c = nouveau_i2c(event->priv); 341 struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
342 struct nouveau_i2c_port *port = i2c->find(i2c, index); 342 struct nouveau_i2c_port *port = i2c->find(i2c, index);
343 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; 343 const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
344 if (port && port->aux >= 0) 344 if (port && port->aux >= 0)
345 impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux); 345 impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
346} 346}
347 347
348static int
349nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
350{
351 struct nvkm_i2c_ntfy_req *req = data;
352 if (!WARN_ON(size != sizeof(*req))) {
353 notify->size = sizeof(struct nvkm_i2c_ntfy_rep);
354 notify->types = req->mask;
355 notify->index = req->port;
356 return 0;
357 }
358 return -EINVAL;
359}
360
348static void 361static void
349nouveau_i2c_intr(struct nouveau_subdev *subdev) 362nouveau_i2c_intr(struct nouveau_subdev *subdev)
350{ 363{
@@ -364,13 +377,26 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev)
364 if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG; 377 if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
365 if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ; 378 if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
366 if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE; 379 if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
367 380 if (e) {
368 nouveau_event_trigger(i2c->ntfy, e, port->index); 381 struct nvkm_i2c_ntfy_rep rep = {
382 .mask = e,
383 };
384 nvkm_event_send(&i2c->event, rep.mask,
385 port->index, &rep,
386 sizeof(rep));
387 }
369 } 388 }
370 } 389 }
371 } 390 }
372} 391}
373 392
393static const struct nvkm_event_func
394nouveau_i2c_intr_func = {
395 .ctor = nouveau_i2c_intr_ctor,
396 .init = nouveau_i2c_intr_init,
397 .fini = nouveau_i2c_intr_fini,
398};
399
374int 400int
375_nouveau_i2c_fini(struct nouveau_object *object, bool suspend) 401_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
376{ 402{
@@ -431,7 +457,7 @@ _nouveau_i2c_dtor(struct nouveau_object *object)
431 struct nouveau_i2c *i2c = (void *)object; 457 struct nouveau_i2c *i2c = (void *)object;
432 struct nouveau_i2c_port *port, *temp; 458 struct nouveau_i2c_port *port, *temp;
433 459
434 nouveau_event_destroy(&i2c->ntfy); 460 nvkm_event_fini(&i2c->event);
435 461
436 list_for_each_entry_safe(port, temp, &i2c->ports, head) { 462 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
437 nouveau_object_ref(NULL, (struct nouveau_object **)&port); 463 nouveau_object_ref(NULL, (struct nouveau_object **)&port);
@@ -547,13 +573,10 @@ nouveau_i2c_create_(struct nouveau_object *parent,
547 } 573 }
548 } 574 }
549 575
550 ret = nouveau_event_create(4, index, &i2c->ntfy); 576 ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event);
551 if (ret) 577 if (ret)
552 return ret; 578 return ret;
553 579
554 i2c->ntfy->priv = i2c;
555 i2c->ntfy->enable = nouveau_i2c_intr_enable;
556 i2c->ntfy->disable = nouveau_i2c_intr_disable;
557 return 0; 580 return 0;
558} 581}
559 582
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
new file mode 100644
index 000000000000..32ed442c5913
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "priv.h"
26
27static int
28nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
29 struct nouveau_mm_node **pnode)
30{
31 struct nvkm_ltc_priv *priv = (void *)ltc;
32 int ret;
33
34 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
35 if (ret)
36 *pnode = NULL;
37
38 return ret;
39}
40
41static void
42nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode)
43{
44 struct nvkm_ltc_priv *priv = (void *)ltc;
45 nouveau_mm_free(&priv->tags, pnode);
46}
47
48static void
49nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count)
50{
51 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
52 struct nvkm_ltc_priv *priv = (void *)ltc;
53 const u32 limit = first + count - 1;
54
55 BUG_ON((first > limit) || (limit >= priv->num_tags));
56
57 impl->cbc_clear(priv, first, limit);
58 impl->cbc_wait(priv);
59}
60
61static int
62nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4])
63{
64 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
65 struct nvkm_ltc_priv *priv = (void *)ltc;
66 memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
67 impl->zbc_clear_color(priv, index, color);
68 return index;
69}
70
71static int
72nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth)
73{
74 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
75 struct nvkm_ltc_priv *priv = (void *)ltc;
76 priv->zbc_depth[index] = depth;
77 impl->zbc_clear_depth(priv, index, depth);
78 return index;
79}
80
81int
82_nvkm_ltc_init(struct nouveau_object *object)
83{
84 const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
85 struct nvkm_ltc_priv *priv = (void *)object;
86 int ret, i;
87
88 ret = nouveau_subdev_init(&priv->base.base);
89 if (ret)
90 return ret;
91
92 for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
93 impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
94 impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
95 }
96
97 return 0;
98}
99
100int
101nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
102 struct nouveau_oclass *oclass, int length, void **pobject)
103{
104 const struct nvkm_ltc_impl *impl = (void *)oclass;
105 struct nvkm_ltc_priv *priv;
106 int ret;
107
108 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG",
109 "l2c", length, pobject);
110 priv = *pobject;
111 if (ret)
112 return ret;
113
114 memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
115 memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
116
117 priv->base.base.intr = impl->intr;
118 priv->base.tags_alloc = nvkm_ltc_tags_alloc;
119 priv->base.tags_free = nvkm_ltc_tags_free;
120 priv->base.tags_clear = nvkm_ltc_tags_clear;
121 priv->base.zbc_min = 1; /* reserve 0 for disabled */
122 priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1;
123 priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
124 priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index f2f3338a967a..9e00a1ede120 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -25,10 +25,45 @@
25#include <subdev/fb.h> 25#include <subdev/fb.h>
26#include <subdev/timer.h> 26#include <subdev/timer.h>
27 27
28#include "gf100.h" 28#include "priv.h"
29
30void
31gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
32{
33 nv_wr32(priv, 0x17e8cc, start);
34 nv_wr32(priv, 0x17e8d0, limit);
35 nv_wr32(priv, 0x17e8c8, 0x00000004);
36}
37
38void
39gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
40{
41 int c, s;
42 for (c = 0; c < priv->ltc_nr; c++) {
43 for (s = 0; s < priv->lts_nr; s++)
44 nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
45 }
46}
47
48void
49gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
50{
51 nv_mask(priv, 0x17ea44, 0x0000000f, i);
52 nv_wr32(priv, 0x17ea48, color[0]);
53 nv_wr32(priv, 0x17ea4c, color[1]);
54 nv_wr32(priv, 0x17ea50, color[2]);
55 nv_wr32(priv, 0x17ea54, color[3]);
56}
57
58void
59gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
60{
61 nv_mask(priv, 0x17ea44, 0x0000000f, i);
62 nv_wr32(priv, 0x17ea58, depth);
63}
29 64
30static void 65static void
31gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) 66gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
32{ 67{
33 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); 68 u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
34 u32 stat = nv_rd32(priv, base + 0x020); 69 u32 stat = nv_rd32(priv, base + 0x020);
@@ -39,17 +74,17 @@ gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
39 } 74 }
40} 75}
41 76
42static void 77void
43gf100_ltcg_intr(struct nouveau_subdev *subdev) 78gf100_ltc_intr(struct nouveau_subdev *subdev)
44{ 79{
45 struct gf100_ltcg_priv *priv = (void *)subdev; 80 struct nvkm_ltc_priv *priv = (void *)subdev;
46 u32 mask; 81 u32 mask;
47 82
48 mask = nv_rd32(priv, 0x00017c); 83 mask = nv_rd32(priv, 0x00017c);
49 while (mask) { 84 while (mask) {
50 u32 lts, ltc = __ffs(mask); 85 u32 lts, ltc = __ffs(mask);
51 for (lts = 0; lts < priv->lts_nr; lts++) 86 for (lts = 0; lts < priv->lts_nr; lts++)
52 gf100_ltcg_lts_isr(priv, ltc, lts); 87 gf100_ltc_lts_isr(priv, ltc, lts);
53 mask &= ~(1 << ltc); 88 mask &= ~(1 << ltc);
54 } 89 }
55 90
@@ -59,52 +94,38 @@ gf100_ltcg_intr(struct nouveau_subdev *subdev)
59 nv_mask(priv, 0x000640, 0x02000000, 0x00000000); 94 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
60} 95}
61 96
62int 97static int
63gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n, 98gf100_ltc_init(struct nouveau_object *object)
64 struct nouveau_mm_node **pnode)
65{ 99{
66 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; 100 struct nvkm_ltc_priv *priv = (void *)object;
67 int ret; 101 int ret;
68 102
69 ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); 103 ret = nvkm_ltc_init(priv);
70 if (ret) 104 if (ret)
71 *pnode = NULL; 105 return ret;
72 106
73 return ret; 107 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
108 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
109 nv_wr32(priv, 0x17e8d4, priv->tag_base);
110 return 0;
74} 111}
75 112
76void 113void
77gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) 114gf100_ltc_dtor(struct nouveau_object *object)
78{
79 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
80
81 nouveau_mm_free(&priv->tags, pnode);
82}
83
84static void
85gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
86{ 115{
87 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; 116 struct nouveau_fb *pfb = nouveau_fb(object);
88 u32 last = first + count - 1; 117 struct nvkm_ltc_priv *priv = (void *)object;
89 int p, i;
90
91 BUG_ON((first > last) || (last >= priv->num_tags));
92 118
93 nv_wr32(priv, 0x17e8cc, first); 119 nouveau_mm_fini(&priv->tags);
94 nv_wr32(priv, 0x17e8d0, last); 120 nouveau_mm_free(&pfb->vram, &priv->tag_ram);
95 nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
96 121
97 /* wait until it's finished with clearing */ 122 nvkm_ltc_destroy(priv);
98 for (p = 0; p < priv->ltc_nr; ++p) {
99 for (i = 0; i < priv->lts_nr; ++i)
100 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
101 }
102} 123}
103 124
104/* TODO: Figure out tag memory details and drop the over-cautious allocation. 125/* TODO: Figure out tag memory details and drop the over-cautious allocation.
105 */ 126 */
106int 127int
107gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv) 128gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
108{ 129{
109 u32 tag_size, tag_margin, tag_align; 130 u32 tag_size, tag_margin, tag_align;
110 int ret; 131 int ret;
@@ -142,22 +163,22 @@ gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
142 163
143 priv->tag_base = tag_base; 164 priv->tag_base = tag_base;
144 } 165 }
145 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
146 166
167 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
147 return ret; 168 return ret;
148} 169}
149 170
150static int 171int
151gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 172gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
152 struct nouveau_oclass *oclass, void *data, u32 size, 173 struct nouveau_oclass *oclass, void *data, u32 size,
153 struct nouveau_object **pobject) 174 struct nouveau_object **pobject)
154{ 175{
155 struct gf100_ltcg_priv *priv;
156 struct nouveau_fb *pfb = nouveau_fb(parent); 176 struct nouveau_fb *pfb = nouveau_fb(parent);
177 struct nvkm_ltc_priv *priv;
157 u32 parts, mask; 178 u32 parts, mask;
158 int ret, i; 179 int ret, i;
159 180
160 ret = nouveau_ltcg_create(parent, engine, oclass, &priv); 181 ret = nvkm_ltc_create(parent, engine, oclass, &priv);
161 *pobject = nv_object(priv); 182 *pobject = nv_object(priv);
162 if (ret) 183 if (ret)
163 return ret; 184 return ret;
@@ -170,57 +191,27 @@ gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
170 } 191 }
171 priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28; 192 priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
172 193
173 ret = gf100_ltcg_init_tag_ram(pfb, priv); 194 ret = gf100_ltc_init_tag_ram(pfb, priv);
174 if (ret) 195 if (ret)
175 return ret; 196 return ret;
176 197
177 priv->base.tags_alloc = gf100_ltcg_tags_alloc; 198 nv_subdev(priv)->intr = gf100_ltc_intr;
178 priv->base.tags_free = gf100_ltcg_tags_free;
179 priv->base.tags_clear = gf100_ltcg_tags_clear;
180
181 nv_subdev(priv)->intr = gf100_ltcg_intr;
182 return 0;
183}
184
185void
186gf100_ltcg_dtor(struct nouveau_object *object)
187{
188 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
189 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
190 struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
191
192 nouveau_mm_fini(&priv->tags);
193 nouveau_mm_free(&pfb->vram, &priv->tag_ram);
194
195 nouveau_ltcg_destroy(ltcg);
196}
197
198static int
199gf100_ltcg_init(struct nouveau_object *object)
200{
201 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
202 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
203 int ret;
204
205 ret = nouveau_ltcg_init(ltcg);
206 if (ret)
207 return ret;
208
209 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
210 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
211 if (nv_device(ltcg)->card_type >= NV_E0)
212 nv_wr32(priv, 0x17e000, priv->ltc_nr);
213 nv_wr32(priv, 0x17e8d4, priv->tag_base);
214 return 0; 199 return 0;
215} 200}
216 201
217struct nouveau_oclass * 202struct nouveau_oclass *
218gf100_ltcg_oclass = &(struct nouveau_oclass) { 203gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
219 .handle = NV_SUBDEV(LTCG, 0xc0), 204 .base.handle = NV_SUBDEV(LTC, 0xc0),
220 .ofuncs = &(struct nouveau_ofuncs) { 205 .base.ofuncs = &(struct nouveau_ofuncs) {
221 .ctor = gf100_ltcg_ctor, 206 .ctor = gf100_ltc_ctor,
222 .dtor = gf100_ltcg_dtor, 207 .dtor = gf100_ltc_dtor,
223 .init = gf100_ltcg_init, 208 .init = gf100_ltc_init,
224 .fini = _nouveau_ltcg_fini, 209 .fini = _nvkm_ltc_fini,
225 }, 210 },
226}; 211 .intr = gf100_ltc_intr,
212 .cbc_clear = gf100_ltc_cbc_clear,
213 .cbc_wait = gf100_ltc_cbc_wait,
214 .zbc = 16,
215 .zbc_clear_color = gf100_ltc_zbc_clear_color,
216 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
217}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
new file mode 100644
index 000000000000..ea716569745d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27static int
28gk104_ltc_init(struct nouveau_object *object)
29{
30 struct nvkm_ltc_priv *priv = (void *)object;
31 int ret;
32
33 ret = nvkm_ltc_init(priv);
34 if (ret)
35 return ret;
36
37 nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
38 nv_wr32(priv, 0x17e000, priv->ltc_nr);
39 nv_wr32(priv, 0x17e8d4, priv->tag_base);
40 return 0;
41}
42
43struct nouveau_oclass *
44gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
45 .base.handle = NV_SUBDEV(LTC, 0xe4),
46 .base.ofuncs = &(struct nouveau_ofuncs) {
47 .ctor = gf100_ltc_ctor,
48 .dtor = gf100_ltc_dtor,
49 .init = gk104_ltc_init,
50 .fini = _nvkm_ltc_fini,
51 },
52 .intr = gf100_ltc_intr,
53 .cbc_clear = gf100_ltc_cbc_clear,
54 .cbc_wait = gf100_ltc_cbc_wait,
55 .zbc = 16,
56 .zbc_clear_color = gf100_ltc_zbc_clear_color,
57 .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
58}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index e79d0e81de40..4761b2e9af00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -25,10 +25,45 @@
25#include <subdev/fb.h> 25#include <subdev/fb.h>
26#include <subdev/timer.h> 26#include <subdev/timer.h>
27 27
28#include "gf100.h" 28#include "priv.h"
29 29
30static void 30static void
31gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) 31gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
32{
33 nv_wr32(priv, 0x17e270, start);
34 nv_wr32(priv, 0x17e274, limit);
35 nv_wr32(priv, 0x17e26c, 0x00000004);
36}
37
38static void
39gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
40{
41 int c, s;
42 for (c = 0; c < priv->ltc_nr; c++) {
43 for (s = 0; s < priv->lts_nr; s++)
44 nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
45 }
46}
47
48static void
49gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
50{
51 nv_mask(priv, 0x17e338, 0x0000000f, i);
52 nv_wr32(priv, 0x17e33c, color[0]);
53 nv_wr32(priv, 0x17e340, color[1]);
54 nv_wr32(priv, 0x17e344, color[2]);
55 nv_wr32(priv, 0x17e348, color[3]);
56}
57
58static void
59gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
60{
61 nv_mask(priv, 0x17e338, 0x0000000f, i);
62 nv_wr32(priv, 0x17e34c, depth);
63}
64
65static void
66gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
32{ 67{
33 u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400); 68 u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
34 u32 stat = nv_rd32(priv, base + 0x00c); 69 u32 stat = nv_rd32(priv, base + 0x00c);
@@ -40,16 +75,16 @@ gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
40} 75}
41 76
42static void 77static void
43gm107_ltcg_intr(struct nouveau_subdev *subdev) 78gm107_ltc_intr(struct nouveau_subdev *subdev)
44{ 79{
45 struct gf100_ltcg_priv *priv = (void *)subdev; 80 struct nvkm_ltc_priv *priv = (void *)subdev;
46 u32 mask; 81 u32 mask;
47 82
48 mask = nv_rd32(priv, 0x00017c); 83 mask = nv_rd32(priv, 0x00017c);
49 while (mask) { 84 while (mask) {
50 u32 lts, ltc = __ffs(mask); 85 u32 lts, ltc = __ffs(mask);
51 for (lts = 0; lts < priv->lts_nr; lts++) 86 for (lts = 0; lts < priv->lts_nr; lts++)
52 gm107_ltcg_lts_isr(priv, ltc, lts); 87 gm107_ltc_lts_isr(priv, ltc, lts);
53 mask &= ~(1 << ltc); 88 mask &= ~(1 << ltc);
54 } 89 }
55 90
@@ -59,37 +94,32 @@ gm107_ltcg_intr(struct nouveau_subdev *subdev)
59 nv_mask(priv, 0x000640, 0x02000000, 0x00000000); 94 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
60} 95}
61 96
62static void 97static int
63gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) 98gm107_ltc_init(struct nouveau_object *object)
64{ 99{
65 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; 100 struct nvkm_ltc_priv *priv = (void *)object;
66 u32 last = first + count - 1; 101 int ret;
67 int p, i;
68
69 BUG_ON((first > last) || (last >= priv->num_tags));
70 102
71 nv_wr32(priv, 0x17e270, first); 103 ret = nvkm_ltc_init(priv);
72 nv_wr32(priv, 0x17e274, last); 104 if (ret)
73 nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */ 105 return ret;
74 106
75 /* wait until it's finished with clearing */ 107 nv_wr32(priv, 0x17e27c, priv->ltc_nr);
76 for (p = 0; p < priv->ltc_nr; ++p) { 108 nv_wr32(priv, 0x17e278, priv->tag_base);
77 for (i = 0; i < priv->lts_nr; ++i) 109 return 0;
78 nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0);
79 }
80} 110}
81 111
82static int 112static int
83gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 113gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size, 114 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject) 115 struct nouveau_object **pobject)
86{ 116{
87 struct gf100_ltcg_priv *priv;
88 struct nouveau_fb *pfb = nouveau_fb(parent); 117 struct nouveau_fb *pfb = nouveau_fb(parent);
118 struct nvkm_ltc_priv *priv;
89 u32 parts, mask; 119 u32 parts, mask;
90 int ret, i; 120 int ret, i;
91 121
92 ret = nouveau_ltcg_create(parent, engine, oclass, &priv); 122 ret = nvkm_ltc_create(parent, engine, oclass, &priv);
93 *pobject = nv_object(priv); 123 *pobject = nv_object(priv);
94 if (ret) 124 if (ret)
95 return ret; 125 return ret;
@@ -102,41 +132,26 @@ gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
102 } 132 }
103 priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28; 133 priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
104 134
105 ret = gf100_ltcg_init_tag_ram(pfb, priv); 135 ret = gf100_ltc_init_tag_ram(pfb, priv);
106 if (ret)
107 return ret;
108
109 priv->base.tags_alloc = gf100_ltcg_tags_alloc;
110 priv->base.tags_free = gf100_ltcg_tags_free;
111 priv->base.tags_clear = gm107_ltcg_tags_clear;
112
113 nv_subdev(priv)->intr = gm107_ltcg_intr;
114 return 0;
115}
116
117static int
118gm107_ltcg_init(struct nouveau_object *object)
119{
120 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
121 struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
122 int ret;
123
124 ret = nouveau_ltcg_init(ltcg);
125 if (ret) 136 if (ret)
126 return ret; 137 return ret;
127 138
128 nv_wr32(priv, 0x17e27c, priv->ltc_nr);
129 nv_wr32(priv, 0x17e278, priv->tag_base);
130 return 0; 139 return 0;
131} 140}
132 141
133struct nouveau_oclass * 142struct nouveau_oclass *
134gm107_ltcg_oclass = &(struct nouveau_oclass) { 143gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
135 .handle = NV_SUBDEV(LTCG, 0xff), 144 .base.handle = NV_SUBDEV(LTC, 0xff),
136 .ofuncs = &(struct nouveau_ofuncs) { 145 .base.ofuncs = &(struct nouveau_ofuncs) {
137 .ctor = gm107_ltcg_ctor, 146 .ctor = gm107_ltc_ctor,
138 .dtor = gf100_ltcg_dtor, 147 .dtor = gf100_ltc_dtor,
139 .init = gm107_ltcg_init, 148 .init = gm107_ltc_init,
140 .fini = _nouveau_ltcg_fini, 149 .fini = _nvkm_ltc_fini,
141 }, 150 },
142}; 151 .intr = gm107_ltc_intr,
152 .cbc_clear = gm107_ltc_cbc_clear,
153 .cbc_wait = gm107_ltc_cbc_wait,
154 .zbc = 16,
155 .zbc_clear_color = gm107_ltc_zbc_clear_color,
156 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
157}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
new file mode 100644
index 000000000000..594924f39126
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
@@ -0,0 +1,69 @@
1#ifndef __NVKM_LTC_PRIV_H__
2#define __NVKM_LTC_PRIV_H__
3
4#include <subdev/ltc.h>
5#include <subdev/fb.h>
6
7struct nvkm_ltc_priv {
8 struct nouveau_ltc base;
9 u32 ltc_nr;
10 u32 lts_nr;
11
12 u32 num_tags;
13 u32 tag_base;
14 struct nouveau_mm tags;
15 struct nouveau_mm_node *tag_ram;
16
17 u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4];
18 u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
19};
20
21#define nvkm_ltc_create(p,e,o,d) \
22 nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
23#define nvkm_ltc_destroy(p) ({ \
24 struct nvkm_ltc_priv *_priv = (p); \
25 _nvkm_ltc_dtor(nv_object(_priv)); \
26})
27#define nvkm_ltc_init(p) ({ \
28 struct nvkm_ltc_priv *_priv = (p); \
29 _nvkm_ltc_init(nv_object(_priv)); \
30})
31#define nvkm_ltc_fini(p,s) ({ \
32 struct nvkm_ltc_priv *_priv = (p); \
33 _nvkm_ltc_fini(nv_object(_priv), (s)); \
34})
35
36int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *,
37 struct nouveau_oclass *, int, void **);
38
39#define _nvkm_ltc_dtor _nouveau_subdev_dtor
40int _nvkm_ltc_init(struct nouveau_object *);
41#define _nvkm_ltc_fini _nouveau_subdev_fini
42
43int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *,
44 struct nouveau_oclass *, void *, u32,
45 struct nouveau_object **);
46void gf100_ltc_dtor(struct nouveau_object *);
47int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *);
48int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **);
49void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **);
50
51struct nvkm_ltc_impl {
52 struct nouveau_oclass base;
53 void (*intr)(struct nouveau_subdev *);
54
55 void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
56 void (*cbc_wait)(struct nvkm_ltc_priv *);
57
58 int zbc;
59 void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
60 void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
61};
62
63void gf100_ltc_intr(struct nouveau_subdev *);
64void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
65void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
66void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
67void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
68
69#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
deleted file mode 100644
index 87b10b8412ea..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
+++ /dev/null
@@ -1,21 +0,0 @@
1#ifndef __NVKM_LTCG_PRIV_GF100_H__
2#define __NVKM_LTCG_PRIV_GF100_H__
3
4#include <subdev/ltcg.h>
5
6struct gf100_ltcg_priv {
7 struct nouveau_ltcg base;
8 u32 ltc_nr;
9 u32 lts_nr;
10 u32 num_tags;
11 u32 tag_base;
12 struct nouveau_mm tags;
13 struct nouveau_mm_node *tag_ram;
14};
15
16void gf100_ltcg_dtor(struct nouveau_object *);
17int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *);
18int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **);
19void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8a5555192fa5..ca7cee3a314a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -22,9 +22,17 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/mc.h> 25#include "priv.h"
26#include <core/option.h> 26#include <core/option.h>
27 27
28static inline void
29nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data)
30{
31 const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc);
32 if (impl->unk260)
33 impl->unk260(pmc, data);
34}
35
28static inline u32 36static inline u32
29nouveau_mc_intr_mask(struct nouveau_mc *pmc) 37nouveau_mc_intr_mask(struct nouveau_mc *pmc)
30{ 38{
@@ -114,6 +122,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
114 if (ret) 122 if (ret)
115 return ret; 123 return ret;
116 124
125 pmc->unk260 = nouveau_mc_unk260;
126
117 if (nv_device_is_pci(device)) 127 if (nv_device_is_pci(device))
118 switch (device->pdev->device & 0x0ff0) { 128 switch (device->pdev->device & 0x0ff0) {
119 case 0x00f0: 129 case 0x00f0:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
new file mode 100644
index 000000000000..b8d6cb435d0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27struct nouveau_oclass *
28gk20a_mc_oclass = &(struct nouveau_mc_oclass) {
29 .base.handle = NV_SUBDEV(MC, 0xea),
30 .base.ofuncs = &(struct nouveau_ofuncs) {
31 .ctor = nv04_mc_ctor,
32 .dtor = _nouveau_mc_dtor,
33 .init = nv50_mc_init,
34 .fini = _nouveau_mc_fini,
35 },
36 .intr = nvc0_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm,
38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index 81a408e7d034..4d9ea46c47c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -1,7 +1,7 @@
1#ifndef __NVKM_MC_NV04_H__ 1#ifndef __NVKM_MC_NV04_H__
2#define __NVKM_MC_NV04_H__ 2#define __NVKM_MC_NV04_H__
3 3
4#include <subdev/mc.h> 4#include "priv.h"
5 5
6struct nv04_mc_priv { 6struct nv04_mc_priv {
7 struct nouveau_mc base; 7 struct nouveau_mc base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index f9c6a678b47d..15d41dc176ff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -41,7 +41,7 @@ nvc0_mc_intr[] = {
41 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ 41 { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
42 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ 42 { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
43 { 0x01000000, NVDEV_SUBDEV_PWR }, 43 { 0x01000000, NVDEV_SUBDEV_PWR },
44 { 0x02000000, NVDEV_SUBDEV_LTCG }, 44 { 0x02000000, NVDEV_SUBDEV_LTC },
45 { 0x08000000, NVDEV_SUBDEV_FB }, 45 { 0x08000000, NVDEV_SUBDEV_FB },
46 { 0x10000000, NVDEV_SUBDEV_BUS }, 46 { 0x10000000, NVDEV_SUBDEV_BUS },
47 { 0x40000000, NVDEV_SUBDEV_IBUS }, 47 { 0x40000000, NVDEV_SUBDEV_IBUS },
@@ -56,6 +56,12 @@ nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
56 nv_wr32(priv, 0x088704, 0x00000000); 56 nv_wr32(priv, 0x088704, 0x00000000);
57} 57}
58 58
59void
60nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data)
61{
62 nv_wr32(pmc, 0x000260, data);
63}
64
59struct nouveau_oclass * 65struct nouveau_oclass *
60nvc0_mc_oclass = &(struct nouveau_mc_oclass) { 66nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
61 .base.handle = NV_SUBDEV(MC, 0xc0), 67 .base.handle = NV_SUBDEV(MC, 0xc0),
@@ -67,4 +73,5 @@ nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
67 }, 73 },
68 .intr = nvc0_mc_intr, 74 .intr = nvc0_mc_intr,
69 .msi_rearm = nvc0_mc_msi_rearm, 75 .msi_rearm = nvc0_mc_msi_rearm,
76 .unk260 = nvc0_mc_unk260,
70}.base; 77}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
index 837e545aeb9f..68b5f61aadb5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -35,4 +35,5 @@ nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
35 }, 35 },
36 .intr = nvc0_mc_intr, 36 .intr = nvc0_mc_intr,
37 .msi_rearm = nv40_mc_msi_rearm, 37 .msi_rearm = nv40_mc_msi_rearm,
38 .unk260 = nvc0_mc_unk260,
38}.base; 39}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
new file mode 100644
index 000000000000..911e66392587
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
@@ -0,0 +1,38 @@
1#ifndef __NVKM_MC_PRIV_H__
2#define __NVKM_MC_PRIV_H__
3
4#include <subdev/mc.h>
5
6#define nouveau_mc_create(p,e,o,d) \
7 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
8#define nouveau_mc_destroy(p) ({ \
9 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
10})
11#define nouveau_mc_init(p) ({ \
12 struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
13})
14#define nouveau_mc_fini(p,s) ({ \
15 struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
16})
17
18int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
19 struct nouveau_oclass *, int, void **);
20void _nouveau_mc_dtor(struct nouveau_object *);
21int _nouveau_mc_init(struct nouveau_object *);
22int _nouveau_mc_fini(struct nouveau_object *, bool);
23
24struct nouveau_mc_intr {
25 u32 stat;
26 u32 unit;
27};
28
29struct nouveau_mc_oclass {
30 struct nouveau_oclass base;
31 const struct nouveau_mc_intr *intr;
32 void (*msi_rearm)(struct nouveau_mc *);
33 void (*unk260)(struct nouveau_mc *, u32);
34};
35
36void nvc0_mc_unk260(struct nouveau_mc *, u32);
37
38#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
index d4fd3bc9c66f..69f1f34f6931 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -22,9 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/pwr.h>
26#include <subdev/timer.h> 25#include <subdev/timer.h>
27 26
27#include "priv.h"
28
29static void
30nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
31{
32 const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr);
33 if (impl->pgob)
34 impl->pgob(ppwr, enable);
35}
36
28static int 37static int
29nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], 38nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
30 u32 process, u32 message, u32 data0, u32 data1) 39 u32 process, u32 message, u32 data0, u32 data1)
@@ -177,6 +186,7 @@ _nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
177int 186int
178_nouveau_pwr_init(struct nouveau_object *object) 187_nouveau_pwr_init(struct nouveau_object *object)
179{ 188{
189 const struct nvkm_pwr_impl *impl = (void *)object->oclass;
180 struct nouveau_pwr *ppwr = (void *)object; 190 struct nouveau_pwr *ppwr = (void *)object;
181 int ret, i; 191 int ret, i;
182 192
@@ -186,6 +196,7 @@ _nouveau_pwr_init(struct nouveau_object *object)
186 196
187 nv_subdev(ppwr)->intr = nouveau_pwr_intr; 197 nv_subdev(ppwr)->intr = nouveau_pwr_intr;
188 ppwr->message = nouveau_pwr_send; 198 ppwr->message = nouveau_pwr_send;
199 ppwr->pgob = nouveau_pwr_pgob;
189 200
190 /* prevent previous ucode from running, wait for idle, reset */ 201 /* prevent previous ucode from running, wait for idle, reset */
191 nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ 202 nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
@@ -195,15 +206,15 @@ _nouveau_pwr_init(struct nouveau_object *object)
195 206
196 /* upload data segment */ 207 /* upload data segment */
197 nv_wr32(ppwr, 0x10a1c0, 0x01000000); 208 nv_wr32(ppwr, 0x10a1c0, 0x01000000);
198 for (i = 0; i < ppwr->data.size / 4; i++) 209 for (i = 0; i < impl->data.size / 4; i++)
199 nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]); 210 nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]);
200 211
201 /* upload code segment */ 212 /* upload code segment */
202 nv_wr32(ppwr, 0x10a180, 0x01000000); 213 nv_wr32(ppwr, 0x10a180, 0x01000000);
203 for (i = 0; i < ppwr->code.size / 4; i++) { 214 for (i = 0; i < impl->code.size / 4; i++) {
204 if ((i & 0x3f) == 0) 215 if ((i & 0x3f) == 0)
205 nv_wr32(ppwr, 0x10a188, i >> 6); 216 nv_wr32(ppwr, 0x10a188, i >> 6);
206 nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]); 217 nv_wr32(ppwr, 0x10a184, impl->code.data[i]);
207 } 218 }
208 219
209 /* start it running */ 220 /* start it running */
@@ -245,3 +256,15 @@ nouveau_pwr_create_(struct nouveau_object *parent,
245 init_waitqueue_head(&ppwr->recv.wait); 256 init_waitqueue_head(&ppwr->recv.wait);
246 return 0; 257 return 0;
247} 258}
259
260int
261_nouveau_pwr_ctor(struct nouveau_object *parent,
262 struct nouveau_object *engine,
263 struct nouveau_oclass *oclass, void *data, u32 size,
264 struct nouveau_object **pobject)
265{
266 struct nouveau_pwr *ppwr;
267 int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr);
268 *pobject = nv_object(ppwr);
269 return ret;
270}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index e2a63ac5422b..5668e045bac1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -242,7 +242,7 @@
242*/ push reg /* 242*/ push reg /*
243*/ pop $r13 /* 243*/ pop $r13 /*
244*/ pop $r14 /* 244*/ pop $r14 /*
245*/ call(wr32) /* 245*/ call(wr32)
246#else 246#else
247#define nv_wr32(addr,reg) /* 247#define nv_wr32(addr,reg) /*
248*/ sethi $r0 0x14000000 /* 248*/ sethi $r0 0x14000000 /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 39a5dc150a05..986495d533dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000046f, 49 0x00000464,
50 0x00000461, 50 0x00000456,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000473, 71 0x00000468,
72 0x00000471, 72 0x00000466,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000877, 93 0x0000086c,
94 0x0000071e, 94 0x00000713,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000898, 115 0x0000088d,
116 0x00000879, 116 0x0000086e,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x000008a3, 137 0x00000898,
138 0x000008a1, 138 0x00000896,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -239,10 +239,10 @@ uint32_t nv108_pwr_data[] = {
239 0x000003df, 239 0x000003df,
240 0x00040003, 240 0x00040003,
241 0x00000000, 241 0x00000000,
242 0x00000407, 242 0x000003fc,
243 0x00010004, 243 0x00010004,
244 0x00000000, 244 0x00000000,
245 0x00000421, 245 0x00000416,
246/* 0x03ac: memx_func_tail */ 246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */ 247/* 0x03ac: memx_data_head */
248 0x00000000, 248 0x00000000,
@@ -1080,375 +1080,375 @@ uint32_t nv108_pwr_code[] = {
1080 0x50f960f9, 1080 0x50f960f9,
1081 0xe0fcd0fc, 1081 0xe0fcd0fc,
1082 0x00002e7e, 1082 0x00002e7e,
1083 0x140003f1, 1083 0xf40242b6,
1084 0xa00506fd, 1084 0x00f8e81b,
1085 0xb604bd05, 1085/* 0x03fc: memx_func_wait */
1086 0x1bf40242, 1086 0x88cf2c08,
1087/* 0x0407: memx_func_wait */
1088 0x0800f8dd,
1089 0x0088cf2c,
1090 0x98001e98,
1091 0x1c98011d,
1092 0x031b9802,
1093 0x7e1010b6,
1094 0xf8000071,
1095/* 0x0421: memx_func_delay */
1096 0x001e9800, 1087 0x001e9800,
1097 0x7e0410b6, 1088 0x98011d98,
1098 0xf800005d, 1089 0x1b98021c,
1099/* 0x042d: memx_exec */ 1090 0x1010b603,
1100 0xf9e0f900, 1091 0x0000717e,
1101 0xb2c1b2d0, 1092/* 0x0416: memx_func_delay */
1102/* 0x0435: memx_exec_next */ 1093 0x1e9800f8,
1103 0x001398b2, 1094 0x0410b600,
1104 0x950410b6,
1105 0x30f01034,
1106 0xde35980c,
1107 0x12a655f9,
1108 0xfced1ef4,
1109 0x7ee0fcd0,
1110 0xf800023f,
1111/* 0x0455: memx_info */
1112 0x03ac4c00,
1113 0x7e08004b,
1114 0xf800023f,
1115/* 0x0461: memx_recv */
1116 0x01d6b000,
1117 0xb0c90bf4,
1118 0x0bf400d6,
1119/* 0x046f: memx_init */
1120 0xf800f8eb,
1121/* 0x0471: perf_recv */
1122/* 0x0473: perf_init */
1123 0xf800f800,
1124/* 0x0475: i2c_drive_scl */
1125 0x0036b000,
1126 0x400d0bf4,
1127 0x01f607e0,
1128 0xf804bd00,
1129/* 0x0485: i2c_drive_scl_lo */
1130 0x07e44000,
1131 0xbd0001f6,
1132/* 0x048f: i2c_drive_sda */
1133 0xb000f804,
1134 0x0bf40036,
1135 0x07e0400d,
1136 0xbd0002f6,
1137/* 0x049f: i2c_drive_sda_lo */
1138 0x4000f804,
1139 0x02f607e4,
1140 0xf804bd00,
1141/* 0x04a9: i2c_sense_scl */
1142 0x0132f400,
1143 0xcf07c443,
1144 0x31fd0033,
1145 0x060bf404,
1146/* 0x04bb: i2c_sense_scl_done */
1147 0xf80131f4,
1148/* 0x04bd: i2c_sense_sda */
1149 0x0132f400,
1150 0xcf07c443,
1151 0x32fd0033,
1152 0x060bf404,
1153/* 0x04cf: i2c_sense_sda_done */
1154 0xf80131f4,
1155/* 0x04d1: i2c_raise_scl */
1156 0x4440f900,
1157 0x01030898,
1158 0x0004757e,
1159/* 0x04dc: i2c_raise_scl_wait */
1160 0x7e03e84e,
1161 0x7e00005d,
1162 0xf40004a9,
1163 0x42b60901,
1164 0xef1bf401,
1165/* 0x04f0: i2c_raise_scl_done */
1166 0x00f840fc,
1167/* 0x04f4: i2c_start */
1168 0x0004a97e,
1169 0x7e0d11f4,
1170 0xf40004bd,
1171 0x0ef40611,
1172/* 0x0505: i2c_start_rep */
1173 0x7e00032e,
1174 0x03000475,
1175 0x048f7e01,
1176 0x0076bb00,
1177 0xf90465b6,
1178 0x04659450,
1179 0xbd0256bb,
1180 0x0475fd50,
1181 0xd17e50fc,
1182 0x64b60004,
1183 0x1d11f404,
1184/* 0x0530: i2c_start_send */
1185 0x8f7e0003,
1186 0x884e0004,
1187 0x005d7e13,
1188 0x7e000300,
1189 0x4e000475,
1190 0x5d7e1388,
1191/* 0x054a: i2c_start_out */
1192 0x00f80000,
1193/* 0x054c: i2c_stop */
1194 0x757e0003,
1195 0x00030004,
1196 0x00048f7e,
1197 0x7e03e84e,
1198 0x0300005d,
1199 0x04757e01,
1200 0x13884e00,
1201 0x00005d7e, 1095 0x00005d7e,
1202 0x8f7e0103, 1096/* 0x0422: memx_exec */
1203 0x884e0004, 1097 0xe0f900f8,
1204 0x005d7e13, 1098 0xc1b2d0f9,
1205/* 0x057b: i2c_bitw */ 1099/* 0x042a: memx_exec_next */
1206 0x7e00f800, 1100 0x1398b2b2,
1207 0x4e00048f, 1101 0x0410b600,
1208 0x5d7e03e8, 1102 0xf0103495,
1209 0x76bb0000, 1103 0x35980c30,
1104 0xa655f9de,
1105 0xed1ef412,
1106 0xe0fcd0fc,
1107 0x00023f7e,
1108/* 0x044a: memx_info */
1109 0xac4c00f8,
1110 0x08004b03,
1111 0x00023f7e,
1112/* 0x0456: memx_recv */
1113 0xd6b000f8,
1114 0xc90bf401,
1115 0xf400d6b0,
1116 0x00f8eb0b,
1117/* 0x0464: memx_init */
1118/* 0x0466: perf_recv */
1119 0x00f800f8,
1120/* 0x0468: perf_init */
1121/* 0x046a: i2c_drive_scl */
1122 0x36b000f8,
1123 0x0d0bf400,
1124 0xf607e040,
1125 0x04bd0001,
1126/* 0x047a: i2c_drive_scl_lo */
1127 0xe44000f8,
1128 0x0001f607,
1129 0x00f804bd,
1130/* 0x0484: i2c_drive_sda */
1131 0xf40036b0,
1132 0xe0400d0b,
1133 0x0002f607,
1134 0x00f804bd,
1135/* 0x0494: i2c_drive_sda_lo */
1136 0xf607e440,
1137 0x04bd0002,
1138/* 0x049e: i2c_sense_scl */
1139 0x32f400f8,
1140 0x07c44301,
1141 0xfd0033cf,
1142 0x0bf40431,
1143 0x0131f406,
1144/* 0x04b0: i2c_sense_scl_done */
1145/* 0x04b2: i2c_sense_sda */
1146 0x32f400f8,
1147 0x07c44301,
1148 0xfd0033cf,
1149 0x0bf40432,
1150 0x0131f406,
1151/* 0x04c4: i2c_sense_sda_done */
1152/* 0x04c6: i2c_raise_scl */
1153 0x40f900f8,
1154 0x03089844,
1155 0x046a7e01,
1156/* 0x04d1: i2c_raise_scl_wait */
1157 0x03e84e00,
1158 0x00005d7e,
1159 0x00049e7e,
1160 0xb60901f4,
1161 0x1bf40142,
1162/* 0x04e5: i2c_raise_scl_done */
1163 0xf840fcef,
1164/* 0x04e9: i2c_start */
1165 0x049e7e00,
1166 0x0d11f400,
1167 0x0004b27e,
1168 0xf40611f4,
1169/* 0x04fa: i2c_start_rep */
1170 0x00032e0e,
1171 0x00046a7e,
1172 0x847e0103,
1173 0x76bb0004,
1210 0x0465b600, 1174 0x0465b600,
1211 0x659450f9, 1175 0x659450f9,
1212 0x0256bb04, 1176 0x0256bb04,
1213 0x75fd50bd, 1177 0x75fd50bd,
1214 0x7e50fc04, 1178 0x7e50fc04,
1215 0xb60004d1, 1179 0xb60004c6,
1216 0x11f40464, 1180 0x11f40464,
1217 0x13884e17, 1181/* 0x0525: i2c_start_send */
1182 0x7e00031d,
1183 0x4e000484,
1184 0x5d7e1388,
1185 0x00030000,
1186 0x00046a7e,
1187 0x7e13884e,
1188/* 0x053f: i2c_start_out */
1189 0xf800005d,
1190/* 0x0541: i2c_stop */
1191 0x7e000300,
1192 0x0300046a,
1193 0x04847e00,
1194 0x03e84e00,
1218 0x00005d7e, 1195 0x00005d7e,
1219 0x757e0003, 1196 0x6a7e0103,
1220 0x884e0004, 1197 0x884e0004,
1221 0x005d7e13, 1198 0x005d7e13,
1222/* 0x05b9: i2c_bitw_out */ 1199 0x7e010300,
1223/* 0x05bb: i2c_bitr */ 1200 0x4e000484,
1224 0x0300f800, 1201 0x5d7e1388,
1225 0x048f7e01, 1202 0x00f80000,
1226 0x03e84e00, 1203/* 0x0570: i2c_bitw */
1227 0x00005d7e, 1204 0x0004847e,
1228 0xb60076bb, 1205 0x7e03e84e,
1229 0x50f90465, 1206 0xbb00005d,
1230 0xbb046594,
1231 0x50bd0256,
1232 0xfc0475fd,
1233 0x04d17e50,
1234 0x0464b600,
1235 0x7e1a11f4,
1236 0x030004bd,
1237 0x04757e00,
1238 0x13884e00,
1239 0x00005d7e,
1240 0xf4013cf0,
1241/* 0x05fe: i2c_bitr_done */
1242 0x00f80131,
1243/* 0x0600: i2c_get_byte */
1244 0x08040005,
1245/* 0x0604: i2c_get_byte_next */
1246 0xbb0154b6,
1247 0x65b60076, 1207 0x65b60076,
1248 0x9450f904, 1208 0x9450f904,
1249 0x56bb0465, 1209 0x56bb0465,
1250 0xfd50bd02, 1210 0xfd50bd02,
1251 0x50fc0475, 1211 0x50fc0475,
1252 0x0005bb7e, 1212 0x0004c67e,
1253 0xf40464b6, 1213 0xf40464b6,
1254 0x53fd2a11, 1214 0x884e1711,
1255 0x0142b605, 1215 0x005d7e13,
1256 0x03d81bf4, 1216 0x7e000300,
1257 0x0076bb01, 1217 0x4e00046a,
1218 0x5d7e1388,
1219/* 0x05ae: i2c_bitw_out */
1220 0x00f80000,
1221/* 0x05b0: i2c_bitr */
1222 0x847e0103,
1223 0xe84e0004,
1224 0x005d7e03,
1225 0x0076bb00,
1258 0xf90465b6, 1226 0xf90465b6,
1259 0x04659450, 1227 0x04659450,
1260 0xbd0256bb, 1228 0xbd0256bb,
1261 0x0475fd50, 1229 0x0475fd50,
1262 0x7b7e50fc, 1230 0xc67e50fc,
1263 0x64b60005, 1231 0x64b60004,
1264/* 0x064d: i2c_get_byte_done */ 1232 0x1a11f404,
1265/* 0x064f: i2c_put_byte */ 1233 0x0004b27e,
1266 0x0400f804, 1234 0x6a7e0003,
1267/* 0x0651: i2c_put_byte_next */ 1235 0x884e0004,
1268 0x0142b608, 1236 0x005d7e13,
1269 0xbb3854ff, 1237 0x013cf000,
1270 0x65b60076, 1238/* 0x05f3: i2c_bitr_done */
1271 0x9450f904, 1239 0xf80131f4,
1272 0x56bb0465, 1240/* 0x05f5: i2c_get_byte */
1273 0xfd50bd02, 1241 0x04000500,
1274 0x50fc0475, 1242/* 0x05f9: i2c_get_byte_next */
1275 0x00057b7e, 1243 0x0154b608,
1276 0xf40464b6,
1277 0x46b03411,
1278 0xd81bf400,
1279 0xb60076bb, 1244 0xb60076bb,
1280 0x50f90465, 1245 0x50f90465,
1281 0xbb046594, 1246 0xbb046594,
1282 0x50bd0256, 1247 0x50bd0256,
1283 0xfc0475fd, 1248 0xfc0475fd,
1284 0x05bb7e50, 1249 0x05b07e50,
1285 0x0464b600, 1250 0x0464b600,
1286 0xbb0f11f4, 1251 0xfd2a11f4,
1287 0x36b00076, 1252 0x42b60553,
1288 0x061bf401, 1253 0xd81bf401,
1289/* 0x06a7: i2c_put_byte_done */ 1254 0x76bb0103,
1290 0xf80132f4,
1291/* 0x06a9: i2c_addr */
1292 0x0076bb00,
1293 0xf90465b6,
1294 0x04659450,
1295 0xbd0256bb,
1296 0x0475fd50,
1297 0xf47e50fc,
1298 0x64b60004,
1299 0x2911f404,
1300 0x012ec3e7,
1301 0xfd0134b6,
1302 0x76bb0553,
1303 0x0465b600, 1255 0x0465b600,
1304 0x659450f9, 1256 0x659450f9,
1305 0x0256bb04, 1257 0x0256bb04,
1306 0x75fd50bd, 1258 0x75fd50bd,
1307 0x7e50fc04, 1259 0x7e50fc04,
1308 0xb600064f, 1260 0xb6000570,
1309/* 0x06ee: i2c_addr_done */ 1261/* 0x0642: i2c_get_byte_done */
1310 0x00f80464, 1262 0x00f80464,
1311/* 0x06f0: i2c_acquire_addr */ 1263/* 0x0644: i2c_put_byte */
1312 0xb6f8cec7, 1264/* 0x0646: i2c_put_byte_next */
1313 0xe0b705e4, 1265 0x42b60804,
1314 0x00f8d014, 1266 0x3854ff01,
1315/* 0x06fc: i2c_acquire */
1316 0x0006f07e,
1317 0x0000047e,
1318 0x7e03d9f0,
1319 0xf800002e,
1320/* 0x070d: i2c_release */
1321 0x06f07e00,
1322 0x00047e00,
1323 0x03daf000,
1324 0x00002e7e,
1325/* 0x071e: i2c_recv */
1326 0x32f400f8,
1327 0xf8c1c701,
1328 0xb00214b6,
1329 0x1ff52816,
1330 0x13b80137,
1331 0x98000bd4,
1332 0x13b80032,
1333 0x98000bac,
1334 0x31f40031,
1335 0xf9d0f902,
1336 0xf1d0f9e0,
1337 0xf1000067,
1338 0x92100063,
1339 0x76bb0167,
1340 0x0465b600,
1341 0x659450f9,
1342 0x0256bb04,
1343 0x75fd50bd,
1344 0x7e50fc04,
1345 0xb60006fc,
1346 0xd0fc0464,
1347 0xf500d6b0,
1348 0x0500b01b,
1349 0x0076bb00,
1350 0xf90465b6,
1351 0x04659450,
1352 0xbd0256bb,
1353 0x0475fd50,
1354 0xa97e50fc,
1355 0x64b60006,
1356 0xcc11f504,
1357 0xe0c5c700,
1358 0xb60076bb, 1267 0xb60076bb,
1359 0x50f90465, 1268 0x50f90465,
1360 0xbb046594, 1269 0xbb046594,
1361 0x50bd0256, 1270 0x50bd0256,
1362 0xfc0475fd, 1271 0xfc0475fd,
1363 0x064f7e50, 1272 0x05707e50,
1364 0x0464b600, 1273 0x0464b600,
1365 0x00a911f5, 1274 0xb03411f4,
1366 0x76bb0105, 1275 0x1bf40046,
1276 0x0076bbd8,
1277 0xf90465b6,
1278 0x04659450,
1279 0xbd0256bb,
1280 0x0475fd50,
1281 0xb07e50fc,
1282 0x64b60005,
1283 0x0f11f404,
1284 0xb00076bb,
1285 0x1bf40136,
1286 0x0132f406,
1287/* 0x069c: i2c_put_byte_done */
1288/* 0x069e: i2c_addr */
1289 0x76bb00f8,
1367 0x0465b600, 1290 0x0465b600,
1368 0x659450f9, 1291 0x659450f9,
1369 0x0256bb04, 1292 0x0256bb04,
1370 0x75fd50bd, 1293 0x75fd50bd,
1371 0x7e50fc04, 1294 0x7e50fc04,
1372 0xb60006a9, 1295 0xb60004e9,
1373 0x11f50464, 1296 0x11f40464,
1374 0x76bb0087, 1297 0x2ec3e729,
1298 0x0134b601,
1299 0xbb0553fd,
1300 0x65b60076,
1301 0x9450f904,
1302 0x56bb0465,
1303 0xfd50bd02,
1304 0x50fc0475,
1305 0x0006447e,
1306/* 0x06e3: i2c_addr_done */
1307 0xf80464b6,
1308/* 0x06e5: i2c_acquire_addr */
1309 0xf8cec700,
1310 0xb705e4b6,
1311 0xf8d014e0,
1312/* 0x06f1: i2c_acquire */
1313 0x06e57e00,
1314 0x00047e00,
1315 0x03d9f000,
1316 0x00002e7e,
1317/* 0x0702: i2c_release */
1318 0xe57e00f8,
1319 0x047e0006,
1320 0xdaf00000,
1321 0x002e7e03,
1322/* 0x0713: i2c_recv */
1323 0xf400f800,
1324 0xc1c70132,
1325 0x0214b6f8,
1326 0xf52816b0,
1327 0xb801371f,
1328 0x000bd413,
1329 0xb8003298,
1330 0x000bac13,
1331 0xf4003198,
1332 0xd0f90231,
1333 0xd0f9e0f9,
1334 0x000067f1,
1335 0x100063f1,
1336 0xbb016792,
1337 0x65b60076,
1338 0x9450f904,
1339 0x56bb0465,
1340 0xfd50bd02,
1341 0x50fc0475,
1342 0x0006f17e,
1343 0xfc0464b6,
1344 0x00d6b0d0,
1345 0x00b01bf5,
1346 0x76bb0005,
1375 0x0465b600, 1347 0x0465b600,
1376 0x659450f9, 1348 0x659450f9,
1377 0x0256bb04, 1349 0x0256bb04,
1378 0x75fd50bd, 1350 0x75fd50bd,
1379 0x7e50fc04, 1351 0x7e50fc04,
1380 0xb6000600, 1352 0xb600069e,
1381 0x11f40464, 1353 0x11f50464,
1382 0xe05bcb67, 1354 0xc5c700cc,
1383 0xb60076bb, 1355 0x0076bbe0,
1384 0x50f90465, 1356 0xf90465b6,
1385 0xbb046594, 1357 0x04659450,
1386 0x50bd0256, 1358 0xbd0256bb,
1387 0xfc0475fd, 1359 0x0475fd50,
1388 0x054c7e50, 1360 0x447e50fc,
1389 0x0464b600, 1361 0x64b60006,
1390 0x74bd5bb2, 1362 0xa911f504,
1391/* 0x0823: i2c_recv_not_rd08 */ 1363 0xbb010500,
1392 0xb0410ef4, 1364 0x65b60076,
1393 0x1bf401d6, 1365 0x9450f904,
1394 0x7e00053b, 1366 0x56bb0465,
1395 0xf40006a9, 1367 0xfd50bd02,
1396 0xc5c73211, 1368 0x50fc0475,
1397 0x064f7ee0, 1369 0x00069e7e,
1398 0x2811f400, 1370 0xf50464b6,
1399 0xa97e0005, 1371 0xbb008711,
1372 0x65b60076,
1373 0x9450f904,
1374 0x56bb0465,
1375 0xfd50bd02,
1376 0x50fc0475,
1377 0x0005f57e,
1378 0xf40464b6,
1379 0x5bcb6711,
1380 0x0076bbe0,
1381 0xf90465b6,
1382 0x04659450,
1383 0xbd0256bb,
1384 0x0475fd50,
1385 0x417e50fc,
1386 0x64b60005,
1387 0xbd5bb204,
1388 0x410ef474,
1389/* 0x0818: i2c_recv_not_rd08 */
1390 0xf401d6b0,
1391 0x00053b1b,
1392 0x00069e7e,
1393 0xc73211f4,
1394 0x447ee0c5,
1400 0x11f40006, 1395 0x11f40006,
1401 0xe0b5c71f, 1396 0x7e000528,
1402 0x00064f7e, 1397 0xf400069e,
1403 0x7e1511f4, 1398 0xb5c71f11,
1404 0xbd00054c, 1399 0x06447ee0,
1405 0x08c5c774, 1400 0x1511f400,
1406 0xf4091bf4, 1401 0x0005417e,
1407 0x0ef40232, 1402 0xc5c774bd,
1408/* 0x0861: i2c_recv_not_wr08 */ 1403 0x091bf408,
1409/* 0x0861: i2c_recv_done */ 1404 0xf40232f4,
1410 0xf8cec703, 1405/* 0x0856: i2c_recv_not_wr08 */
1411 0x00070d7e, 1406/* 0x0856: i2c_recv_done */
1412 0xd0fce0fc, 1407 0xcec7030e,
1413 0xb20912f4, 1408 0x07027ef8,
1414 0x023f7e7c, 1409 0xfce0fc00,
1415/* 0x0875: i2c_recv_exit */ 1410 0x0912f4d0,
1416/* 0x0877: i2c_init */ 1411 0x3f7e7cb2,
1417 0xf800f800, 1412/* 0x086a: i2c_recv_exit */
1418/* 0x0879: test_recv */ 1413 0x00f80002,
1419 0x04584100, 1414/* 0x086c: i2c_init */
1420 0xb60011cf, 1415/* 0x086e: test_recv */
1421 0x58400110, 1416 0x584100f8,
1422 0x0001f604,
1423 0xe7f104bd,
1424 0xe3f1d900,
1425 0x967e134f,
1426 0x00f80001,
1427/* 0x0898: test_init */
1428 0x7e08004e,
1429 0xf8000196,
1430/* 0x08a1: idle_recv */
1431/* 0x08a3: idle */
1432 0xf400f800,
1433 0x54410031,
1434 0x0011cf04, 1417 0x0011cf04,
1435 0x400110b6, 1418 0x400110b6,
1436 0x01f60454, 1419 0x01f60458,
1437/* 0x08b7: idle_loop */ 1420 0xf104bd00,
1438 0x0104bd00, 1421 0xf1d900e7,
1439 0x0232f458, 1422 0x7e134fe3,
1440/* 0x08bc: idle_proc */ 1423 0xf8000196,
1441/* 0x08bc: idle_proc_exec */ 1424/* 0x088d: test_init */
1442 0x1eb210f9, 1425 0x08004e00,
1443 0x0002487e, 1426 0x0001967e,
1444 0x11f410fc, 1427/* 0x0896: idle_recv */
1445 0x0231f409, 1428 0x00f800f8,
1446/* 0x08cf: idle_proc_next */ 1429/* 0x0898: idle */
1447 0xb6f00ef4, 1430 0x410031f4,
1448 0x1fa65810, 1431 0x11cf0454,
1449 0xf4e81bf4, 1432 0x0110b600,
1450 0x28f4e002, 1433 0xf6045440,
1451 0xc60ef400, 1434 0x04bd0001,
1435/* 0x08ac: idle_loop */
1436 0x32f45801,
1437/* 0x08b1: idle_proc */
1438/* 0x08b1: idle_proc_exec */
1439 0xb210f902,
1440 0x02487e1e,
1441 0xf410fc00,
1442 0x31f40911,
1443 0xf00ef402,
1444/* 0x08c4: idle_proc_next */
1445 0xa65810b6,
1446 0xe81bf41f,
1447 0xf4e002f4,
1448 0x0ef40028,
1449 0x000000c6,
1450 0x00000000,
1451 0x00000000,
1452 0x00000000, 1452 0x00000000,
1453 0x00000000, 1453 0x00000000,
1454 0x00000000, 1454 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 254205cd5166..e087ce3041be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000054e, 49 0x00000542,
50 0x00000540, 50 0x00000534,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000552, 71 0x00000546,
72 0x00000550, 72 0x00000544,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000982, 93 0x00000976,
94 0x00000825, 94 0x00000819,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x000009ab, 115 0x0000099f,
116 0x00000984, 116 0x00000978,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x000009b7, 137 0x000009ab,
138 0x000009b5, 138 0x000009a9,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -239,10 +239,10 @@ uint32_t nva3_pwr_data[] = {
239 0x000004b7, 239 0x000004b7,
240 0x00040003, 240 0x00040003,
241 0x00000000, 241 0x00000000,
242 0x000004df, 242 0x000004d3,
243 0x00010004, 243 0x00010004,
244 0x00000000, 244 0x00000000,
245 0x000004fc, 245 0x000004f0,
246/* 0x03ac: memx_func_tail */ 246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */ 247/* 0x03ac: memx_data_head */
248 0x00000000, 248 0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nva3_pwr_code[] = {
1198 0x0810b601, 1198 0x0810b601,
1199 0x50f960f9, 1199 0x50f960f9,
1200 0xe0fcd0fc, 1200 0xe0fcd0fc,
1201 0xf13f21f4, 1201 0xb63f21f4,
1202 0xfd140003,
1203 0x05800506,
1204 0xb604bd00,
1205 0x1bf40242, 1202 0x1bf40242,
1206/* 0x04df: memx_func_wait */ 1203/* 0x04d3: memx_func_wait */
1207 0xf000f8dd, 1204 0xf000f8e9,
1208 0x84b62c87, 1205 0x84b62c87,
1209 0x0088cf06, 1206 0x0088cf06,
1210 0x98001e98, 1207 0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nva3_pwr_code[] = {
1212 0x031b9802, 1209 0x031b9802,
1213 0xf41010b6, 1210 0xf41010b6,
1214 0x00f89c21, 1211 0x00f89c21,
1215/* 0x04fc: memx_func_delay */ 1212/* 0x04f0: memx_func_delay */
1216 0xb6001e98, 1213 0xb6001e98,
1217 0x21f40410, 1214 0x21f40410,
1218/* 0x0507: memx_exec */ 1215/* 0x04fb: memx_exec */
1219 0xf900f87f, 1216 0xf900f87f,
1220 0xb9d0f9e0, 1217 0xb9d0f9e0,
1221 0xb2b902c1, 1218 0xb2b902c1,
1222/* 0x0511: memx_exec_next */ 1219/* 0x0505: memx_exec_next */
1223 0x00139802, 1220 0x00139802,
1224 0x950410b6, 1221 0x950410b6,
1225 0x30f01034, 1222 0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nva3_pwr_code[] = {
1228 0xec1ef406, 1225 0xec1ef406,
1229 0xe0fcd0fc, 1226 0xe0fcd0fc,
1230 0x02b921f5, 1227 0x02b921f5,
1231/* 0x0532: memx_info */ 1228/* 0x0526: memx_info */
1232 0xc7f100f8, 1229 0xc7f100f8,
1233 0xb7f103ac, 1230 0xb7f103ac,
1234 0x21f50800, 1231 0x21f50800,
1235 0x00f802b9, 1232 0x00f802b9,
1236/* 0x0540: memx_recv */ 1233/* 0x0534: memx_recv */
1237 0xf401d6b0, 1234 0xf401d6b0,
1238 0xd6b0c40b, 1235 0xd6b0c40b,
1239 0xe90bf400, 1236 0xe90bf400,
1240/* 0x054e: memx_init */ 1237/* 0x0542: memx_init */
1241 0x00f800f8, 1238 0x00f800f8,
1242/* 0x0550: perf_recv */ 1239/* 0x0544: perf_recv */
1243/* 0x0552: perf_init */ 1240/* 0x0546: perf_init */
1244 0x00f800f8, 1241 0x00f800f8,
1245/* 0x0554: i2c_drive_scl */ 1242/* 0x0548: i2c_drive_scl */
1246 0xf40036b0, 1243 0xf40036b0,
1247 0x07f1110b, 1244 0x07f1110b,
1248 0x04b607e0, 1245 0x04b607e0,
1249 0x0001d006, 1246 0x0001d006,
1250 0x00f804bd, 1247 0x00f804bd,
1251/* 0x0568: i2c_drive_scl_lo */ 1248/* 0x055c: i2c_drive_scl_lo */
1252 0x07e407f1, 1249 0x07e407f1,
1253 0xd00604b6, 1250 0xd00604b6,
1254 0x04bd0001, 1251 0x04bd0001,
1255/* 0x0576: i2c_drive_sda */ 1252/* 0x056a: i2c_drive_sda */
1256 0x36b000f8, 1253 0x36b000f8,
1257 0x110bf400, 1254 0x110bf400,
1258 0x07e007f1, 1255 0x07e007f1,
1259 0xd00604b6, 1256 0xd00604b6,
1260 0x04bd0002, 1257 0x04bd0002,
1261/* 0x058a: i2c_drive_sda_lo */ 1258/* 0x057e: i2c_drive_sda_lo */
1262 0x07f100f8, 1259 0x07f100f8,
1263 0x04b607e4, 1260 0x04b607e4,
1264 0x0002d006, 1261 0x0002d006,
1265 0x00f804bd, 1262 0x00f804bd,
1266/* 0x0598: i2c_sense_scl */ 1263/* 0x058c: i2c_sense_scl */
1267 0xf10132f4, 1264 0xf10132f4,
1268 0xb607c437, 1265 0xb607c437,
1269 0x33cf0634, 1266 0x33cf0634,
1270 0x0431fd00, 1267 0x0431fd00,
1271 0xf4060bf4, 1268 0xf4060bf4,
1272/* 0x05ae: i2c_sense_scl_done */ 1269/* 0x05a2: i2c_sense_scl_done */
1273 0x00f80131, 1270 0x00f80131,
1274/* 0x05b0: i2c_sense_sda */ 1271/* 0x05a4: i2c_sense_sda */
1275 0xf10132f4, 1272 0xf10132f4,
1276 0xb607c437, 1273 0xb607c437,
1277 0x33cf0634, 1274 0x33cf0634,
1278 0x0432fd00, 1275 0x0432fd00,
1279 0xf4060bf4, 1276 0xf4060bf4,
1280/* 0x05c6: i2c_sense_sda_done */ 1277/* 0x05ba: i2c_sense_sda_done */
1281 0x00f80131, 1278 0x00f80131,
1282/* 0x05c8: i2c_raise_scl */ 1279/* 0x05bc: i2c_raise_scl */
1283 0x47f140f9, 1280 0x47f140f9,
1284 0x37f00898, 1281 0x37f00898,
1285 0x5421f501, 1282 0x4821f501,
1286/* 0x05d5: i2c_raise_scl_wait */ 1283/* 0x05c9: i2c_raise_scl_wait */
1287 0xe8e7f105, 1284 0xe8e7f105,
1288 0x7f21f403, 1285 0x7f21f403,
1289 0x059821f5, 1286 0x058c21f5,
1290 0xb60901f4, 1287 0xb60901f4,
1291 0x1bf40142, 1288 0x1bf40142,
1292/* 0x05e9: i2c_raise_scl_done */ 1289/* 0x05dd: i2c_raise_scl_done */
1293 0xf840fcef, 1290 0xf840fcef,
1294/* 0x05ed: i2c_start */ 1291/* 0x05e1: i2c_start */
1295 0x9821f500, 1292 0x8c21f500,
1296 0x0d11f405, 1293 0x0d11f405,
1297 0x05b021f5, 1294 0x05a421f5,
1298 0xf40611f4, 1295 0xf40611f4,
1299/* 0x05fe: i2c_start_rep */ 1296/* 0x05f2: i2c_start_rep */
1300 0x37f0300e, 1297 0x37f0300e,
1301 0x5421f500, 1298 0x4821f500,
1302 0x0137f005, 1299 0x0137f005,
1303 0x057621f5, 1300 0x056a21f5,
1304 0xb60076bb, 1301 0xb60076bb,
1305 0x50f90465, 1302 0x50f90465,
1306 0xbb046594, 1303 0xbb046594,
1307 0x50bd0256, 1304 0x50bd0256,
1308 0xfc0475fd, 1305 0xfc0475fd,
1309 0xc821f550, 1306 0xbc21f550,
1310 0x0464b605, 1307 0x0464b605,
1311/* 0x062b: i2c_start_send */ 1308/* 0x061f: i2c_start_send */
1312 0xf01f11f4, 1309 0xf01f11f4,
1313 0x21f50037, 1310 0x21f50037,
1314 0xe7f10576, 1311 0xe7f1056a,
1315 0x21f41388, 1312 0x21f41388,
1316 0x0037f07f, 1313 0x0037f07f,
1317 0x055421f5, 1314 0x054821f5,
1318 0x1388e7f1, 1315 0x1388e7f1,
1319/* 0x0647: i2c_start_out */ 1316/* 0x063b: i2c_start_out */
1320 0xf87f21f4, 1317 0xf87f21f4,
1321/* 0x0649: i2c_stop */ 1318/* 0x063d: i2c_stop */
1322 0x0037f000, 1319 0x0037f000,
1323 0x055421f5, 1320 0x054821f5,
1324 0xf50037f0, 1321 0xf50037f0,
1325 0xf1057621, 1322 0xf1056a21,
1326 0xf403e8e7, 1323 0xf403e8e7,
1327 0x37f07f21, 1324 0x37f07f21,
1328 0x5421f501, 1325 0x4821f501,
1329 0x88e7f105, 1326 0x88e7f105,
1330 0x7f21f413, 1327 0x7f21f413,
1331 0xf50137f0, 1328 0xf50137f0,
1332 0xf1057621, 1329 0xf1056a21,
1333 0xf41388e7, 1330 0xf41388e7,
1334 0x00f87f21, 1331 0x00f87f21,
1335/* 0x067c: i2c_bitw */ 1332/* 0x0670: i2c_bitw */
1336 0x057621f5, 1333 0x056a21f5,
1337 0x03e8e7f1, 1334 0x03e8e7f1,
1338 0xbb7f21f4, 1335 0xbb7f21f4,
1339 0x65b60076, 1336 0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nva3_pwr_code[] = {
1341 0x56bb0465, 1338 0x56bb0465,
1342 0xfd50bd02, 1339 0xfd50bd02,
1343 0x50fc0475, 1340 0x50fc0475,
1344 0x05c821f5, 1341 0x05bc21f5,
1345 0xf40464b6, 1342 0xf40464b6,
1346 0xe7f11811, 1343 0xe7f11811,
1347 0x21f41388, 1344 0x21f41388,
1348 0x0037f07f, 1345 0x0037f07f,
1349 0x055421f5, 1346 0x054821f5,
1350 0x1388e7f1, 1347 0x1388e7f1,
1351/* 0x06bb: i2c_bitw_out */ 1348/* 0x06af: i2c_bitw_out */
1352 0xf87f21f4, 1349 0xf87f21f4,
1353/* 0x06bd: i2c_bitr */ 1350/* 0x06b1: i2c_bitr */
1354 0x0137f000, 1351 0x0137f000,
1355 0x057621f5, 1352 0x056a21f5,
1356 0x03e8e7f1, 1353 0x03e8e7f1,
1357 0xbb7f21f4, 1354 0xbb7f21f4,
1358 0x65b60076, 1355 0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nva3_pwr_code[] = {
1360 0x56bb0465, 1357 0x56bb0465,
1361 0xfd50bd02, 1358 0xfd50bd02,
1362 0x50fc0475, 1359 0x50fc0475,
1363 0x05c821f5, 1360 0x05bc21f5,
1364 0xf40464b6, 1361 0xf40464b6,
1365 0x21f51b11, 1362 0x21f51b11,
1366 0x37f005b0, 1363 0x37f005a4,
1367 0x5421f500, 1364 0x4821f500,
1368 0x88e7f105, 1365 0x88e7f105,
1369 0x7f21f413, 1366 0x7f21f413,
1370 0xf4013cf0, 1367 0xf4013cf0,
1371/* 0x0702: i2c_bitr_done */ 1368/* 0x06f6: i2c_bitr_done */
1372 0x00f80131, 1369 0x00f80131,
1373/* 0x0704: i2c_get_byte */ 1370/* 0x06f8: i2c_get_byte */
1374 0xf00057f0, 1371 0xf00057f0,
1375/* 0x070a: i2c_get_byte_next */ 1372/* 0x06fe: i2c_get_byte_next */
1376 0x54b60847, 1373 0x54b60847,
1377 0x0076bb01, 1374 0x0076bb01,
1378 0xf90465b6, 1375 0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nva3_pwr_code[] = {
1380 0xbd0256bb, 1377 0xbd0256bb,
1381 0x0475fd50, 1378 0x0475fd50,
1382 0x21f550fc, 1379 0x21f550fc,
1383 0x64b606bd, 1380 0x64b606b1,
1384 0x2b11f404, 1381 0x2b11f404,
1385 0xb60553fd, 1382 0xb60553fd,
1386 0x1bf40142, 1383 0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nva3_pwr_code[] = {
1390 0xbb046594, 1387 0xbb046594,
1391 0x50bd0256, 1388 0x50bd0256,
1392 0xfc0475fd, 1389 0xfc0475fd,
1393 0x7c21f550, 1390 0x7021f550,
1394 0x0464b606, 1391 0x0464b606,
1395/* 0x0754: i2c_get_byte_done */ 1392/* 0x0748: i2c_get_byte_done */
1396/* 0x0756: i2c_put_byte */ 1393/* 0x074a: i2c_put_byte */
1397 0x47f000f8, 1394 0x47f000f8,
1398/* 0x0759: i2c_put_byte_next */ 1395/* 0x074d: i2c_put_byte_next */
1399 0x0142b608, 1396 0x0142b608,
1400 0xbb3854ff, 1397 0xbb3854ff,
1401 0x65b60076, 1398 0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nva3_pwr_code[] = {
1403 0x56bb0465, 1400 0x56bb0465,
1404 0xfd50bd02, 1401 0xfd50bd02,
1405 0x50fc0475, 1402 0x50fc0475,
1406 0x067c21f5, 1403 0x067021f5,
1407 0xf40464b6, 1404 0xf40464b6,
1408 0x46b03411, 1405 0x46b03411,
1409 0xd81bf400, 1406 0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nva3_pwr_code[] = {
1412 0xbb046594, 1409 0xbb046594,
1413 0x50bd0256, 1410 0x50bd0256,
1414 0xfc0475fd, 1411 0xfc0475fd,
1415 0xbd21f550, 1412 0xb121f550,
1416 0x0464b606, 1413 0x0464b606,
1417 0xbb0f11f4, 1414 0xbb0f11f4,
1418 0x36b00076, 1415 0x36b00076,
1419 0x061bf401, 1416 0x061bf401,
1420/* 0x07af: i2c_put_byte_done */ 1417/* 0x07a3: i2c_put_byte_done */
1421 0xf80132f4, 1418 0xf80132f4,
1422/* 0x07b1: i2c_addr */ 1419/* 0x07a5: i2c_addr */
1423 0x0076bb00, 1420 0x0076bb00,
1424 0xf90465b6, 1421 0xf90465b6,
1425 0x04659450, 1422 0x04659450,
1426 0xbd0256bb, 1423 0xbd0256bb,
1427 0x0475fd50, 1424 0x0475fd50,
1428 0x21f550fc, 1425 0x21f550fc,
1429 0x64b605ed, 1426 0x64b605e1,
1430 0x2911f404, 1427 0x2911f404,
1431 0x012ec3e7, 1428 0x012ec3e7,
1432 0xfd0134b6, 1429 0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nva3_pwr_code[] = {
1436 0x0256bb04, 1433 0x0256bb04,
1437 0x75fd50bd, 1434 0x75fd50bd,
1438 0xf550fc04, 1435 0xf550fc04,
1439 0xb6075621, 1436 0xb6074a21,
1440/* 0x07f6: i2c_addr_done */ 1437/* 0x07ea: i2c_addr_done */
1441 0x00f80464, 1438 0x00f80464,
1442/* 0x07f8: i2c_acquire_addr */ 1439/* 0x07ec: i2c_acquire_addr */
1443 0xb6f8cec7, 1440 0xb6f8cec7,
1444 0xe0b702e4, 1441 0xe0b702e4,
1445 0xee980bfc, 1442 0xee980bfc,
1446/* 0x0807: i2c_acquire */ 1443/* 0x07fb: i2c_acquire */
1447 0xf500f800, 1444 0xf500f800,
1448 0xf407f821, 1445 0xf407ec21,
1449 0xd9f00421, 1446 0xd9f00421,
1450 0x3f21f403, 1447 0x3f21f403,
1451/* 0x0816: i2c_release */ 1448/* 0x080a: i2c_release */
1452 0x21f500f8, 1449 0x21f500f8,
1453 0x21f407f8, 1450 0x21f407ec,
1454 0x03daf004, 1451 0x03daf004,
1455 0xf83f21f4, 1452 0xf83f21f4,
1456/* 0x0825: i2c_recv */ 1453/* 0x0819: i2c_recv */
1457 0x0132f400, 1454 0x0132f400,
1458 0xb6f8c1c7, 1455 0xb6f8c1c7,
1459 0x16b00214, 1456 0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nva3_pwr_code[] = {
1472 0x56bb0465, 1469 0x56bb0465,
1473 0xfd50bd02, 1470 0xfd50bd02,
1474 0x50fc0475, 1471 0x50fc0475,
1475 0x080721f5, 1472 0x07fb21f5,
1476 0xfc0464b6, 1473 0xfc0464b6,
1477 0x00d6b0d0, 1474 0x00d6b0d0,
1478 0x00b31bf5, 1475 0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nva3_pwr_code[] = {
1482 0x56bb0465, 1479 0x56bb0465,
1483 0xfd50bd02, 1480 0xfd50bd02,
1484 0x50fc0475, 1481 0x50fc0475,
1485 0x07b121f5, 1482 0x07a521f5,
1486 0xf50464b6, 1483 0xf50464b6,
1487 0xc700d011, 1484 0xc700d011,
1488 0x76bbe0c5, 1485 0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nva3_pwr_code[] = {
1491 0x0256bb04, 1488 0x0256bb04,
1492 0x75fd50bd, 1489 0x75fd50bd,
1493 0xf550fc04, 1490 0xf550fc04,
1494 0xb6075621, 1491 0xb6074a21,
1495 0x11f50464, 1492 0x11f50464,
1496 0x57f000ad, 1493 0x57f000ad,
1497 0x0076bb01, 1494 0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nva3_pwr_code[] = {
1500 0xbd0256bb, 1497 0xbd0256bb,
1501 0x0475fd50, 1498 0x0475fd50,
1502 0x21f550fc, 1499 0x21f550fc,
1503 0x64b607b1, 1500 0x64b607a5,
1504 0x8a11f504, 1501 0x8a11f504,
1505 0x0076bb00, 1502 0x0076bb00,
1506 0xf90465b6, 1503 0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nva3_pwr_code[] = {
1508 0xbd0256bb, 1505 0xbd0256bb,
1509 0x0475fd50, 1506 0x0475fd50,
1510 0x21f550fc, 1507 0x21f550fc,
1511 0x64b60704, 1508 0x64b606f8,
1512 0x6a11f404, 1509 0x6a11f404,
1513 0xbbe05bcb, 1510 0xbbe05bcb,
1514 0x65b60076, 1511 0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nva3_pwr_code[] = {
1516 0x56bb0465, 1513 0x56bb0465,
1517 0xfd50bd02, 1514 0xfd50bd02,
1518 0x50fc0475, 1515 0x50fc0475,
1519 0x064921f5, 1516 0x063d21f5,
1520 0xb90464b6, 1517 0xb90464b6,
1521 0x74bd025b, 1518 0x74bd025b,
1522/* 0x092b: i2c_recv_not_rd08 */ 1519/* 0x091f: i2c_recv_not_rd08 */
1523 0xb0430ef4, 1520 0xb0430ef4,
1524 0x1bf401d6, 1521 0x1bf401d6,
1525 0x0057f03d, 1522 0x0057f03d,
1526 0x07b121f5, 1523 0x07a521f5,
1527 0xc73311f4, 1524 0xc73311f4,
1528 0x21f5e0c5, 1525 0x21f5e0c5,
1529 0x11f40756, 1526 0x11f4074a,
1530 0x0057f029, 1527 0x0057f029,
1531 0x07b121f5, 1528 0x07a521f5,
1532 0xc71f11f4, 1529 0xc71f11f4,
1533 0x21f5e0b5, 1530 0x21f5e0b5,
1534 0x11f40756, 1531 0x11f4074a,
1535 0x4921f515, 1532 0x3d21f515,
1536 0xc774bd06, 1533 0xc774bd06,
1537 0x1bf408c5, 1534 0x1bf408c5,
1538 0x0232f409, 1535 0x0232f409,
1539/* 0x096b: i2c_recv_not_wr08 */ 1536/* 0x095f: i2c_recv_not_wr08 */
1540/* 0x096b: i2c_recv_done */ 1537/* 0x095f: i2c_recv_done */
1541 0xc7030ef4, 1538 0xc7030ef4,
1542 0x21f5f8ce, 1539 0x21f5f8ce,
1543 0xe0fc0816, 1540 0xe0fc080a,
1544 0x12f4d0fc, 1541 0x12f4d0fc,
1545 0x027cb90a, 1542 0x027cb90a,
1546 0x02b921f5, 1543 0x02b921f5,
1547/* 0x0980: i2c_recv_exit */ 1544/* 0x0974: i2c_recv_exit */
1548/* 0x0982: i2c_init */ 1545/* 0x0976: i2c_init */
1549 0x00f800f8, 1546 0x00f800f8,
1550/* 0x0984: test_recv */ 1547/* 0x0978: test_recv */
1551 0x05d817f1, 1548 0x05d817f1,
1552 0xcf0614b6, 1549 0xcf0614b6,
1553 0x10b60011, 1550 0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nva3_pwr_code[] = {
1557 0x00e7f104, 1554 0x00e7f104,
1558 0x4fe3f1d9, 1555 0x4fe3f1d9,
1559 0xf521f513, 1556 0xf521f513,
1560/* 0x09ab: test_init */ 1557/* 0x099f: test_init */
1561 0xf100f801, 1558 0xf100f801,
1562 0xf50800e7, 1559 0xf50800e7,
1563 0xf801f521, 1560 0xf801f521,
1564/* 0x09b5: idle_recv */ 1561/* 0x09a9: idle_recv */
1565/* 0x09b7: idle */ 1562/* 0x09ab: idle */
1566 0xf400f800, 1563 0xf400f800,
1567 0x17f10031, 1564 0x17f10031,
1568 0x14b605d4, 1565 0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nva3_pwr_code[] = {
1570 0xf10110b6, 1567 0xf10110b6,
1571 0xb605d407, 1568 0xb605d407,
1572 0x01d00604, 1569 0x01d00604,
1573/* 0x09d3: idle_loop */ 1570/* 0x09c7: idle_loop */
1574 0xf004bd00, 1571 0xf004bd00,
1575 0x32f45817, 1572 0x32f45817,
1576/* 0x09d9: idle_proc */ 1573/* 0x09cd: idle_proc */
1577/* 0x09d9: idle_proc_exec */ 1574/* 0x09cd: idle_proc_exec */
1578 0xb910f902, 1575 0xb910f902,
1579 0x21f5021e, 1576 0x21f5021e,
1580 0x10fc02c2, 1577 0x10fc02c2,
1581 0xf40911f4, 1578 0xf40911f4,
1582 0x0ef40231, 1579 0x0ef40231,
1583/* 0x09ed: idle_proc_next */ 1580/* 0x09e1: idle_proc_next */
1584 0x5810b6ef, 1581 0x5810b6ef,
1585 0xf4061fb8, 1582 0xf4061fb8,
1586 0x02f4e61b, 1583 0x02f4e61b,
1587 0x0028f4dd, 1584 0x0028f4dd,
1588 0x00bb0ef4, 1585 0x00bb0ef4,
1586 0x00000000,
1587 0x00000000,
1588 0x00000000,
1589}; 1589};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 7ac87405d01b..0773ff0e3dc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000054e, 49 0x00000542,
50 0x00000540, 50 0x00000534,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000552, 71 0x00000546,
72 0x00000550, 72 0x00000544,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000982, 93 0x00000976,
94 0x00000825, 94 0x00000819,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x000009ab, 115 0x0000099f,
116 0x00000984, 116 0x00000978,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x000009b7, 137 0x000009ab,
138 0x000009b5, 138 0x000009a9,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvc0_pwr_data[] = {
239 0x000004b7, 239 0x000004b7,
240 0x00040003, 240 0x00040003,
241 0x00000000, 241 0x00000000,
242 0x000004df, 242 0x000004d3,
243 0x00010004, 243 0x00010004,
244 0x00000000, 244 0x00000000,
245 0x000004fc, 245 0x000004f0,
246/* 0x03ac: memx_func_tail */ 246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */ 247/* 0x03ac: memx_data_head */
248 0x00000000, 248 0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nvc0_pwr_code[] = {
1198 0x0810b601, 1198 0x0810b601,
1199 0x50f960f9, 1199 0x50f960f9,
1200 0xe0fcd0fc, 1200 0xe0fcd0fc,
1201 0xf13f21f4, 1201 0xb63f21f4,
1202 0xfd140003,
1203 0x05800506,
1204 0xb604bd00,
1205 0x1bf40242, 1202 0x1bf40242,
1206/* 0x04df: memx_func_wait */ 1203/* 0x04d3: memx_func_wait */
1207 0xf000f8dd, 1204 0xf000f8e9,
1208 0x84b62c87, 1205 0x84b62c87,
1209 0x0088cf06, 1206 0x0088cf06,
1210 0x98001e98, 1207 0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nvc0_pwr_code[] = {
1212 0x031b9802, 1209 0x031b9802,
1213 0xf41010b6, 1210 0xf41010b6,
1214 0x00f89c21, 1211 0x00f89c21,
1215/* 0x04fc: memx_func_delay */ 1212/* 0x04f0: memx_func_delay */
1216 0xb6001e98, 1213 0xb6001e98,
1217 0x21f40410, 1214 0x21f40410,
1218/* 0x0507: memx_exec */ 1215/* 0x04fb: memx_exec */
1219 0xf900f87f, 1216 0xf900f87f,
1220 0xb9d0f9e0, 1217 0xb9d0f9e0,
1221 0xb2b902c1, 1218 0xb2b902c1,
1222/* 0x0511: memx_exec_next */ 1219/* 0x0505: memx_exec_next */
1223 0x00139802, 1220 0x00139802,
1224 0x950410b6, 1221 0x950410b6,
1225 0x30f01034, 1222 0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nvc0_pwr_code[] = {
1228 0xec1ef406, 1225 0xec1ef406,
1229 0xe0fcd0fc, 1226 0xe0fcd0fc,
1230 0x02b921f5, 1227 0x02b921f5,
1231/* 0x0532: memx_info */ 1228/* 0x0526: memx_info */
1232 0xc7f100f8, 1229 0xc7f100f8,
1233 0xb7f103ac, 1230 0xb7f103ac,
1234 0x21f50800, 1231 0x21f50800,
1235 0x00f802b9, 1232 0x00f802b9,
1236/* 0x0540: memx_recv */ 1233/* 0x0534: memx_recv */
1237 0xf401d6b0, 1234 0xf401d6b0,
1238 0xd6b0c40b, 1235 0xd6b0c40b,
1239 0xe90bf400, 1236 0xe90bf400,
1240/* 0x054e: memx_init */ 1237/* 0x0542: memx_init */
1241 0x00f800f8, 1238 0x00f800f8,
1242/* 0x0550: perf_recv */ 1239/* 0x0544: perf_recv */
1243/* 0x0552: perf_init */ 1240/* 0x0546: perf_init */
1244 0x00f800f8, 1241 0x00f800f8,
1245/* 0x0554: i2c_drive_scl */ 1242/* 0x0548: i2c_drive_scl */
1246 0xf40036b0, 1243 0xf40036b0,
1247 0x07f1110b, 1244 0x07f1110b,
1248 0x04b607e0, 1245 0x04b607e0,
1249 0x0001d006, 1246 0x0001d006,
1250 0x00f804bd, 1247 0x00f804bd,
1251/* 0x0568: i2c_drive_scl_lo */ 1248/* 0x055c: i2c_drive_scl_lo */
1252 0x07e407f1, 1249 0x07e407f1,
1253 0xd00604b6, 1250 0xd00604b6,
1254 0x04bd0001, 1251 0x04bd0001,
1255/* 0x0576: i2c_drive_sda */ 1252/* 0x056a: i2c_drive_sda */
1256 0x36b000f8, 1253 0x36b000f8,
1257 0x110bf400, 1254 0x110bf400,
1258 0x07e007f1, 1255 0x07e007f1,
1259 0xd00604b6, 1256 0xd00604b6,
1260 0x04bd0002, 1257 0x04bd0002,
1261/* 0x058a: i2c_drive_sda_lo */ 1258/* 0x057e: i2c_drive_sda_lo */
1262 0x07f100f8, 1259 0x07f100f8,
1263 0x04b607e4, 1260 0x04b607e4,
1264 0x0002d006, 1261 0x0002d006,
1265 0x00f804bd, 1262 0x00f804bd,
1266/* 0x0598: i2c_sense_scl */ 1263/* 0x058c: i2c_sense_scl */
1267 0xf10132f4, 1264 0xf10132f4,
1268 0xb607c437, 1265 0xb607c437,
1269 0x33cf0634, 1266 0x33cf0634,
1270 0x0431fd00, 1267 0x0431fd00,
1271 0xf4060bf4, 1268 0xf4060bf4,
1272/* 0x05ae: i2c_sense_scl_done */ 1269/* 0x05a2: i2c_sense_scl_done */
1273 0x00f80131, 1270 0x00f80131,
1274/* 0x05b0: i2c_sense_sda */ 1271/* 0x05a4: i2c_sense_sda */
1275 0xf10132f4, 1272 0xf10132f4,
1276 0xb607c437, 1273 0xb607c437,
1277 0x33cf0634, 1274 0x33cf0634,
1278 0x0432fd00, 1275 0x0432fd00,
1279 0xf4060bf4, 1276 0xf4060bf4,
1280/* 0x05c6: i2c_sense_sda_done */ 1277/* 0x05ba: i2c_sense_sda_done */
1281 0x00f80131, 1278 0x00f80131,
1282/* 0x05c8: i2c_raise_scl */ 1279/* 0x05bc: i2c_raise_scl */
1283 0x47f140f9, 1280 0x47f140f9,
1284 0x37f00898, 1281 0x37f00898,
1285 0x5421f501, 1282 0x4821f501,
1286/* 0x05d5: i2c_raise_scl_wait */ 1283/* 0x05c9: i2c_raise_scl_wait */
1287 0xe8e7f105, 1284 0xe8e7f105,
1288 0x7f21f403, 1285 0x7f21f403,
1289 0x059821f5, 1286 0x058c21f5,
1290 0xb60901f4, 1287 0xb60901f4,
1291 0x1bf40142, 1288 0x1bf40142,
1292/* 0x05e9: i2c_raise_scl_done */ 1289/* 0x05dd: i2c_raise_scl_done */
1293 0xf840fcef, 1290 0xf840fcef,
1294/* 0x05ed: i2c_start */ 1291/* 0x05e1: i2c_start */
1295 0x9821f500, 1292 0x8c21f500,
1296 0x0d11f405, 1293 0x0d11f405,
1297 0x05b021f5, 1294 0x05a421f5,
1298 0xf40611f4, 1295 0xf40611f4,
1299/* 0x05fe: i2c_start_rep */ 1296/* 0x05f2: i2c_start_rep */
1300 0x37f0300e, 1297 0x37f0300e,
1301 0x5421f500, 1298 0x4821f500,
1302 0x0137f005, 1299 0x0137f005,
1303 0x057621f5, 1300 0x056a21f5,
1304 0xb60076bb, 1301 0xb60076bb,
1305 0x50f90465, 1302 0x50f90465,
1306 0xbb046594, 1303 0xbb046594,
1307 0x50bd0256, 1304 0x50bd0256,
1308 0xfc0475fd, 1305 0xfc0475fd,
1309 0xc821f550, 1306 0xbc21f550,
1310 0x0464b605, 1307 0x0464b605,
1311/* 0x062b: i2c_start_send */ 1308/* 0x061f: i2c_start_send */
1312 0xf01f11f4, 1309 0xf01f11f4,
1313 0x21f50037, 1310 0x21f50037,
1314 0xe7f10576, 1311 0xe7f1056a,
1315 0x21f41388, 1312 0x21f41388,
1316 0x0037f07f, 1313 0x0037f07f,
1317 0x055421f5, 1314 0x054821f5,
1318 0x1388e7f1, 1315 0x1388e7f1,
1319/* 0x0647: i2c_start_out */ 1316/* 0x063b: i2c_start_out */
1320 0xf87f21f4, 1317 0xf87f21f4,
1321/* 0x0649: i2c_stop */ 1318/* 0x063d: i2c_stop */
1322 0x0037f000, 1319 0x0037f000,
1323 0x055421f5, 1320 0x054821f5,
1324 0xf50037f0, 1321 0xf50037f0,
1325 0xf1057621, 1322 0xf1056a21,
1326 0xf403e8e7, 1323 0xf403e8e7,
1327 0x37f07f21, 1324 0x37f07f21,
1328 0x5421f501, 1325 0x4821f501,
1329 0x88e7f105, 1326 0x88e7f105,
1330 0x7f21f413, 1327 0x7f21f413,
1331 0xf50137f0, 1328 0xf50137f0,
1332 0xf1057621, 1329 0xf1056a21,
1333 0xf41388e7, 1330 0xf41388e7,
1334 0x00f87f21, 1331 0x00f87f21,
1335/* 0x067c: i2c_bitw */ 1332/* 0x0670: i2c_bitw */
1336 0x057621f5, 1333 0x056a21f5,
1337 0x03e8e7f1, 1334 0x03e8e7f1,
1338 0xbb7f21f4, 1335 0xbb7f21f4,
1339 0x65b60076, 1336 0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nvc0_pwr_code[] = {
1341 0x56bb0465, 1338 0x56bb0465,
1342 0xfd50bd02, 1339 0xfd50bd02,
1343 0x50fc0475, 1340 0x50fc0475,
1344 0x05c821f5, 1341 0x05bc21f5,
1345 0xf40464b6, 1342 0xf40464b6,
1346 0xe7f11811, 1343 0xe7f11811,
1347 0x21f41388, 1344 0x21f41388,
1348 0x0037f07f, 1345 0x0037f07f,
1349 0x055421f5, 1346 0x054821f5,
1350 0x1388e7f1, 1347 0x1388e7f1,
1351/* 0x06bb: i2c_bitw_out */ 1348/* 0x06af: i2c_bitw_out */
1352 0xf87f21f4, 1349 0xf87f21f4,
1353/* 0x06bd: i2c_bitr */ 1350/* 0x06b1: i2c_bitr */
1354 0x0137f000, 1351 0x0137f000,
1355 0x057621f5, 1352 0x056a21f5,
1356 0x03e8e7f1, 1353 0x03e8e7f1,
1357 0xbb7f21f4, 1354 0xbb7f21f4,
1358 0x65b60076, 1355 0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nvc0_pwr_code[] = {
1360 0x56bb0465, 1357 0x56bb0465,
1361 0xfd50bd02, 1358 0xfd50bd02,
1362 0x50fc0475, 1359 0x50fc0475,
1363 0x05c821f5, 1360 0x05bc21f5,
1364 0xf40464b6, 1361 0xf40464b6,
1365 0x21f51b11, 1362 0x21f51b11,
1366 0x37f005b0, 1363 0x37f005a4,
1367 0x5421f500, 1364 0x4821f500,
1368 0x88e7f105, 1365 0x88e7f105,
1369 0x7f21f413, 1366 0x7f21f413,
1370 0xf4013cf0, 1367 0xf4013cf0,
1371/* 0x0702: i2c_bitr_done */ 1368/* 0x06f6: i2c_bitr_done */
1372 0x00f80131, 1369 0x00f80131,
1373/* 0x0704: i2c_get_byte */ 1370/* 0x06f8: i2c_get_byte */
1374 0xf00057f0, 1371 0xf00057f0,
1375/* 0x070a: i2c_get_byte_next */ 1372/* 0x06fe: i2c_get_byte_next */
1376 0x54b60847, 1373 0x54b60847,
1377 0x0076bb01, 1374 0x0076bb01,
1378 0xf90465b6, 1375 0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nvc0_pwr_code[] = {
1380 0xbd0256bb, 1377 0xbd0256bb,
1381 0x0475fd50, 1378 0x0475fd50,
1382 0x21f550fc, 1379 0x21f550fc,
1383 0x64b606bd, 1380 0x64b606b1,
1384 0x2b11f404, 1381 0x2b11f404,
1385 0xb60553fd, 1382 0xb60553fd,
1386 0x1bf40142, 1383 0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nvc0_pwr_code[] = {
1390 0xbb046594, 1387 0xbb046594,
1391 0x50bd0256, 1388 0x50bd0256,
1392 0xfc0475fd, 1389 0xfc0475fd,
1393 0x7c21f550, 1390 0x7021f550,
1394 0x0464b606, 1391 0x0464b606,
1395/* 0x0754: i2c_get_byte_done */ 1392/* 0x0748: i2c_get_byte_done */
1396/* 0x0756: i2c_put_byte */ 1393/* 0x074a: i2c_put_byte */
1397 0x47f000f8, 1394 0x47f000f8,
1398/* 0x0759: i2c_put_byte_next */ 1395/* 0x074d: i2c_put_byte_next */
1399 0x0142b608, 1396 0x0142b608,
1400 0xbb3854ff, 1397 0xbb3854ff,
1401 0x65b60076, 1398 0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nvc0_pwr_code[] = {
1403 0x56bb0465, 1400 0x56bb0465,
1404 0xfd50bd02, 1401 0xfd50bd02,
1405 0x50fc0475, 1402 0x50fc0475,
1406 0x067c21f5, 1403 0x067021f5,
1407 0xf40464b6, 1404 0xf40464b6,
1408 0x46b03411, 1405 0x46b03411,
1409 0xd81bf400, 1406 0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nvc0_pwr_code[] = {
1412 0xbb046594, 1409 0xbb046594,
1413 0x50bd0256, 1410 0x50bd0256,
1414 0xfc0475fd, 1411 0xfc0475fd,
1415 0xbd21f550, 1412 0xb121f550,
1416 0x0464b606, 1413 0x0464b606,
1417 0xbb0f11f4, 1414 0xbb0f11f4,
1418 0x36b00076, 1415 0x36b00076,
1419 0x061bf401, 1416 0x061bf401,
1420/* 0x07af: i2c_put_byte_done */ 1417/* 0x07a3: i2c_put_byte_done */
1421 0xf80132f4, 1418 0xf80132f4,
1422/* 0x07b1: i2c_addr */ 1419/* 0x07a5: i2c_addr */
1423 0x0076bb00, 1420 0x0076bb00,
1424 0xf90465b6, 1421 0xf90465b6,
1425 0x04659450, 1422 0x04659450,
1426 0xbd0256bb, 1423 0xbd0256bb,
1427 0x0475fd50, 1424 0x0475fd50,
1428 0x21f550fc, 1425 0x21f550fc,
1429 0x64b605ed, 1426 0x64b605e1,
1430 0x2911f404, 1427 0x2911f404,
1431 0x012ec3e7, 1428 0x012ec3e7,
1432 0xfd0134b6, 1429 0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nvc0_pwr_code[] = {
1436 0x0256bb04, 1433 0x0256bb04,
1437 0x75fd50bd, 1434 0x75fd50bd,
1438 0xf550fc04, 1435 0xf550fc04,
1439 0xb6075621, 1436 0xb6074a21,
1440/* 0x07f6: i2c_addr_done */ 1437/* 0x07ea: i2c_addr_done */
1441 0x00f80464, 1438 0x00f80464,
1442/* 0x07f8: i2c_acquire_addr */ 1439/* 0x07ec: i2c_acquire_addr */
1443 0xb6f8cec7, 1440 0xb6f8cec7,
1444 0xe0b702e4, 1441 0xe0b702e4,
1445 0xee980bfc, 1442 0xee980bfc,
1446/* 0x0807: i2c_acquire */ 1443/* 0x07fb: i2c_acquire */
1447 0xf500f800, 1444 0xf500f800,
1448 0xf407f821, 1445 0xf407ec21,
1449 0xd9f00421, 1446 0xd9f00421,
1450 0x3f21f403, 1447 0x3f21f403,
1451/* 0x0816: i2c_release */ 1448/* 0x080a: i2c_release */
1452 0x21f500f8, 1449 0x21f500f8,
1453 0x21f407f8, 1450 0x21f407ec,
1454 0x03daf004, 1451 0x03daf004,
1455 0xf83f21f4, 1452 0xf83f21f4,
1456/* 0x0825: i2c_recv */ 1453/* 0x0819: i2c_recv */
1457 0x0132f400, 1454 0x0132f400,
1458 0xb6f8c1c7, 1455 0xb6f8c1c7,
1459 0x16b00214, 1456 0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nvc0_pwr_code[] = {
1472 0x56bb0465, 1469 0x56bb0465,
1473 0xfd50bd02, 1470 0xfd50bd02,
1474 0x50fc0475, 1471 0x50fc0475,
1475 0x080721f5, 1472 0x07fb21f5,
1476 0xfc0464b6, 1473 0xfc0464b6,
1477 0x00d6b0d0, 1474 0x00d6b0d0,
1478 0x00b31bf5, 1475 0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nvc0_pwr_code[] = {
1482 0x56bb0465, 1479 0x56bb0465,
1483 0xfd50bd02, 1480 0xfd50bd02,
1484 0x50fc0475, 1481 0x50fc0475,
1485 0x07b121f5, 1482 0x07a521f5,
1486 0xf50464b6, 1483 0xf50464b6,
1487 0xc700d011, 1484 0xc700d011,
1488 0x76bbe0c5, 1485 0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nvc0_pwr_code[] = {
1491 0x0256bb04, 1488 0x0256bb04,
1492 0x75fd50bd, 1489 0x75fd50bd,
1493 0xf550fc04, 1490 0xf550fc04,
1494 0xb6075621, 1491 0xb6074a21,
1495 0x11f50464, 1492 0x11f50464,
1496 0x57f000ad, 1493 0x57f000ad,
1497 0x0076bb01, 1494 0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nvc0_pwr_code[] = {
1500 0xbd0256bb, 1497 0xbd0256bb,
1501 0x0475fd50, 1498 0x0475fd50,
1502 0x21f550fc, 1499 0x21f550fc,
1503 0x64b607b1, 1500 0x64b607a5,
1504 0x8a11f504, 1501 0x8a11f504,
1505 0x0076bb00, 1502 0x0076bb00,
1506 0xf90465b6, 1503 0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nvc0_pwr_code[] = {
1508 0xbd0256bb, 1505 0xbd0256bb,
1509 0x0475fd50, 1506 0x0475fd50,
1510 0x21f550fc, 1507 0x21f550fc,
1511 0x64b60704, 1508 0x64b606f8,
1512 0x6a11f404, 1509 0x6a11f404,
1513 0xbbe05bcb, 1510 0xbbe05bcb,
1514 0x65b60076, 1511 0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nvc0_pwr_code[] = {
1516 0x56bb0465, 1513 0x56bb0465,
1517 0xfd50bd02, 1514 0xfd50bd02,
1518 0x50fc0475, 1515 0x50fc0475,
1519 0x064921f5, 1516 0x063d21f5,
1520 0xb90464b6, 1517 0xb90464b6,
1521 0x74bd025b, 1518 0x74bd025b,
1522/* 0x092b: i2c_recv_not_rd08 */ 1519/* 0x091f: i2c_recv_not_rd08 */
1523 0xb0430ef4, 1520 0xb0430ef4,
1524 0x1bf401d6, 1521 0x1bf401d6,
1525 0x0057f03d, 1522 0x0057f03d,
1526 0x07b121f5, 1523 0x07a521f5,
1527 0xc73311f4, 1524 0xc73311f4,
1528 0x21f5e0c5, 1525 0x21f5e0c5,
1529 0x11f40756, 1526 0x11f4074a,
1530 0x0057f029, 1527 0x0057f029,
1531 0x07b121f5, 1528 0x07a521f5,
1532 0xc71f11f4, 1529 0xc71f11f4,
1533 0x21f5e0b5, 1530 0x21f5e0b5,
1534 0x11f40756, 1531 0x11f4074a,
1535 0x4921f515, 1532 0x3d21f515,
1536 0xc774bd06, 1533 0xc774bd06,
1537 0x1bf408c5, 1534 0x1bf408c5,
1538 0x0232f409, 1535 0x0232f409,
1539/* 0x096b: i2c_recv_not_wr08 */ 1536/* 0x095f: i2c_recv_not_wr08 */
1540/* 0x096b: i2c_recv_done */ 1537/* 0x095f: i2c_recv_done */
1541 0xc7030ef4, 1538 0xc7030ef4,
1542 0x21f5f8ce, 1539 0x21f5f8ce,
1543 0xe0fc0816, 1540 0xe0fc080a,
1544 0x12f4d0fc, 1541 0x12f4d0fc,
1545 0x027cb90a, 1542 0x027cb90a,
1546 0x02b921f5, 1543 0x02b921f5,
1547/* 0x0980: i2c_recv_exit */ 1544/* 0x0974: i2c_recv_exit */
1548/* 0x0982: i2c_init */ 1545/* 0x0976: i2c_init */
1549 0x00f800f8, 1546 0x00f800f8,
1550/* 0x0984: test_recv */ 1547/* 0x0978: test_recv */
1551 0x05d817f1, 1548 0x05d817f1,
1552 0xcf0614b6, 1549 0xcf0614b6,
1553 0x10b60011, 1550 0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nvc0_pwr_code[] = {
1557 0x00e7f104, 1554 0x00e7f104,
1558 0x4fe3f1d9, 1555 0x4fe3f1d9,
1559 0xf521f513, 1556 0xf521f513,
1560/* 0x09ab: test_init */ 1557/* 0x099f: test_init */
1561 0xf100f801, 1558 0xf100f801,
1562 0xf50800e7, 1559 0xf50800e7,
1563 0xf801f521, 1560 0xf801f521,
1564/* 0x09b5: idle_recv */ 1561/* 0x09a9: idle_recv */
1565/* 0x09b7: idle */ 1562/* 0x09ab: idle */
1566 0xf400f800, 1563 0xf400f800,
1567 0x17f10031, 1564 0x17f10031,
1568 0x14b605d4, 1565 0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nvc0_pwr_code[] = {
1570 0xf10110b6, 1567 0xf10110b6,
1571 0xb605d407, 1568 0xb605d407,
1572 0x01d00604, 1569 0x01d00604,
1573/* 0x09d3: idle_loop */ 1570/* 0x09c7: idle_loop */
1574 0xf004bd00, 1571 0xf004bd00,
1575 0x32f45817, 1572 0x32f45817,
1576/* 0x09d9: idle_proc */ 1573/* 0x09cd: idle_proc */
1577/* 0x09d9: idle_proc_exec */ 1574/* 0x09cd: idle_proc_exec */
1578 0xb910f902, 1575 0xb910f902,
1579 0x21f5021e, 1576 0x21f5021e,
1580 0x10fc02c2, 1577 0x10fc02c2,
1581 0xf40911f4, 1578 0xf40911f4,
1582 0x0ef40231, 1579 0x0ef40231,
1583/* 0x09ed: idle_proc_next */ 1580/* 0x09e1: idle_proc_next */
1584 0x5810b6ef, 1581 0x5810b6ef,
1585 0xf4061fb8, 1582 0xf4061fb8,
1586 0x02f4e61b, 1583 0x02f4e61b,
1587 0x0028f4dd, 1584 0x0028f4dd,
1588 0x00bb0ef4, 1585 0x00bb0ef4,
1586 0x00000000,
1587 0x00000000,
1588 0x00000000,
1589}; 1589};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index cd9ff1a73284..8d369b3faaba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x000004c4, 49 0x000004b8,
50 0x000004b6, 50 0x000004aa,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x000004c8, 71 0x000004bc,
72 0x000004c6, 72 0x000004ba,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x000008e3, 93 0x000008d7,
94 0x00000786, 94 0x0000077a,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000906, 115 0x000008fa,
116 0x000008e5, 116 0x000008d9,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000912, 137 0x00000906,
138 0x00000910, 138 0x00000904,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvd0_pwr_data[] = {
239 0x00000430, 239 0x00000430,
240 0x00040003, 240 0x00040003,
241 0x00000000, 241 0x00000000,
242 0x00000458, 242 0x0000044c,
243 0x00010004, 243 0x00010004,
244 0x00000000, 244 0x00000000,
245 0x00000472, 245 0x00000466,
246/* 0x03ac: memx_func_tail */ 246/* 0x03ac: memx_func_tail */
247/* 0x03ac: memx_data_head */ 247/* 0x03ac: memx_data_head */
248 0x00000000, 248 0x00000000,
@@ -1100,26 +1100,23 @@ uint32_t nvd0_pwr_code[] = {
1100 0xf960f908, 1100 0xf960f908,
1101 0xfcd0fc50, 1101 0xfcd0fc50,
1102 0x3321f4e0, 1102 0x3321f4e0,
1103 0x140003f1,
1104 0x800506fd,
1105 0x04bd0005,
1106 0xf40242b6, 1103 0xf40242b6,
1107 0x00f8dd1b, 1104 0x00f8e91b,
1108/* 0x0458: memx_func_wait */ 1105/* 0x044c: memx_func_wait */
1109 0xcf2c87f0, 1106 0xcf2c87f0,
1110 0x1e980088, 1107 0x1e980088,
1111 0x011d9800, 1108 0x011d9800,
1112 0x98021c98, 1109 0x98021c98,
1113 0x10b6031b, 1110 0x10b6031b,
1114 0x7e21f410, 1111 0x7e21f410,
1115/* 0x0472: memx_func_delay */ 1112/* 0x0466: memx_func_delay */
1116 0x1e9800f8, 1113 0x1e9800f8,
1117 0x0410b600, 1114 0x0410b600,
1118 0xf86721f4, 1115 0xf86721f4,
1119/* 0x047d: memx_exec */ 1116/* 0x0471: memx_exec */
1120 0xf9e0f900, 1117 0xf9e0f900,
1121 0x02c1b9d0, 1118 0x02c1b9d0,
1122/* 0x0487: memx_exec_next */ 1119/* 0x047b: memx_exec_next */
1123 0x9802b2b9, 1120 0x9802b2b9,
1124 0x10b60013, 1121 0x10b60013,
1125 0x10349504, 1122 0x10349504,
@@ -1129,107 +1126,107 @@ uint32_t nvd0_pwr_code[] = {
1129 0xd0fcec1e, 1126 0xd0fcec1e,
1130 0x21f5e0fc, 1127 0x21f5e0fc,
1131 0x00f8026b, 1128 0x00f8026b,
1132/* 0x04a8: memx_info */ 1129/* 0x049c: memx_info */
1133 0x03acc7f1, 1130 0x03acc7f1,
1134 0x0800b7f1, 1131 0x0800b7f1,
1135 0x026b21f5, 1132 0x026b21f5,
1136/* 0x04b6: memx_recv */ 1133/* 0x04aa: memx_recv */
1137 0xd6b000f8, 1134 0xd6b000f8,
1138 0xc40bf401, 1135 0xc40bf401,
1139 0xf400d6b0, 1136 0xf400d6b0,
1140 0x00f8e90b, 1137 0x00f8e90b,
1141/* 0x04c4: memx_init */ 1138/* 0x04b8: memx_init */
1142/* 0x04c6: perf_recv */ 1139/* 0x04ba: perf_recv */
1143 0x00f800f8, 1140 0x00f800f8,
1144/* 0x04c8: perf_init */ 1141/* 0x04bc: perf_init */
1145/* 0x04ca: i2c_drive_scl */ 1142/* 0x04be: i2c_drive_scl */
1146 0x36b000f8, 1143 0x36b000f8,
1147 0x0e0bf400, 1144 0x0e0bf400,
1148 0x07e007f1, 1145 0x07e007f1,
1149 0xbd0001d0, 1146 0xbd0001d0,
1150/* 0x04db: i2c_drive_scl_lo */ 1147/* 0x04cf: i2c_drive_scl_lo */
1151 0xf100f804, 1148 0xf100f804,
1152 0xd007e407, 1149 0xd007e407,
1153 0x04bd0001, 1150 0x04bd0001,
1154/* 0x04e6: i2c_drive_sda */ 1151/* 0x04da: i2c_drive_sda */
1155 0x36b000f8, 1152 0x36b000f8,
1156 0x0e0bf400, 1153 0x0e0bf400,
1157 0x07e007f1, 1154 0x07e007f1,
1158 0xbd0002d0, 1155 0xbd0002d0,
1159/* 0x04f7: i2c_drive_sda_lo */ 1156/* 0x04eb: i2c_drive_sda_lo */
1160 0xf100f804, 1157 0xf100f804,
1161 0xd007e407, 1158 0xd007e407,
1162 0x04bd0002, 1159 0x04bd0002,
1163/* 0x0502: i2c_sense_scl */ 1160/* 0x04f6: i2c_sense_scl */
1164 0x32f400f8, 1161 0x32f400f8,
1165 0xc437f101, 1162 0xc437f101,
1166 0x0033cf07, 1163 0x0033cf07,
1167 0xf40431fd, 1164 0xf40431fd,
1168 0x31f4060b, 1165 0x31f4060b,
1169/* 0x0515: i2c_sense_scl_done */ 1166/* 0x0509: i2c_sense_scl_done */
1170/* 0x0517: i2c_sense_sda */ 1167/* 0x050b: i2c_sense_sda */
1171 0xf400f801, 1168 0xf400f801,
1172 0x37f10132, 1169 0x37f10132,
1173 0x33cf07c4, 1170 0x33cf07c4,
1174 0x0432fd00, 1171 0x0432fd00,
1175 0xf4060bf4, 1172 0xf4060bf4,
1176/* 0x052a: i2c_sense_sda_done */ 1173/* 0x051e: i2c_sense_sda_done */
1177 0x00f80131, 1174 0x00f80131,
1178/* 0x052c: i2c_raise_scl */ 1175/* 0x0520: i2c_raise_scl */
1179 0x47f140f9, 1176 0x47f140f9,
1180 0x37f00898, 1177 0x37f00898,
1181 0xca21f501, 1178 0xbe21f501,
1182/* 0x0539: i2c_raise_scl_wait */ 1179/* 0x052d: i2c_raise_scl_wait */
1183 0xe8e7f104, 1180 0xe8e7f104,
1184 0x6721f403, 1181 0x6721f403,
1185 0x050221f5, 1182 0x04f621f5,
1186 0xb60901f4, 1183 0xb60901f4,
1187 0x1bf40142, 1184 0x1bf40142,
1188/* 0x054d: i2c_raise_scl_done */ 1185/* 0x0541: i2c_raise_scl_done */
1189 0xf840fcef, 1186 0xf840fcef,
1190/* 0x0551: i2c_start */ 1187/* 0x0545: i2c_start */
1191 0x0221f500, 1188 0xf621f500,
1192 0x0d11f405, 1189 0x0d11f404,
1193 0x051721f5, 1190 0x050b21f5,
1194 0xf40611f4, 1191 0xf40611f4,
1195/* 0x0562: i2c_start_rep */ 1192/* 0x0556: i2c_start_rep */
1196 0x37f0300e, 1193 0x37f0300e,
1197 0xca21f500, 1194 0xbe21f500,
1198 0x0137f004, 1195 0x0137f004,
1199 0x04e621f5, 1196 0x04da21f5,
1200 0xb60076bb, 1197 0xb60076bb,
1201 0x50f90465, 1198 0x50f90465,
1202 0xbb046594, 1199 0xbb046594,
1203 0x50bd0256, 1200 0x50bd0256,
1204 0xfc0475fd, 1201 0xfc0475fd,
1205 0x2c21f550, 1202 0x2021f550,
1206 0x0464b605, 1203 0x0464b605,
1207/* 0x058f: i2c_start_send */ 1204/* 0x0583: i2c_start_send */
1208 0xf01f11f4, 1205 0xf01f11f4,
1209 0x21f50037, 1206 0x21f50037,
1210 0xe7f104e6, 1207 0xe7f104da,
1211 0x21f41388, 1208 0x21f41388,
1212 0x0037f067, 1209 0x0037f067,
1213 0x04ca21f5, 1210 0x04be21f5,
1214 0x1388e7f1, 1211 0x1388e7f1,
1215/* 0x05ab: i2c_start_out */ 1212/* 0x059f: i2c_start_out */
1216 0xf86721f4, 1213 0xf86721f4,
1217/* 0x05ad: i2c_stop */ 1214/* 0x05a1: i2c_stop */
1218 0x0037f000, 1215 0x0037f000,
1219 0x04ca21f5, 1216 0x04be21f5,
1220 0xf50037f0, 1217 0xf50037f0,
1221 0xf104e621, 1218 0xf104da21,
1222 0xf403e8e7, 1219 0xf403e8e7,
1223 0x37f06721, 1220 0x37f06721,
1224 0xca21f501, 1221 0xbe21f501,
1225 0x88e7f104, 1222 0x88e7f104,
1226 0x6721f413, 1223 0x6721f413,
1227 0xf50137f0, 1224 0xf50137f0,
1228 0xf104e621, 1225 0xf104da21,
1229 0xf41388e7, 1226 0xf41388e7,
1230 0x00f86721, 1227 0x00f86721,
1231/* 0x05e0: i2c_bitw */ 1228/* 0x05d4: i2c_bitw */
1232 0x04e621f5, 1229 0x04da21f5,
1233 0x03e8e7f1, 1230 0x03e8e7f1,
1234 0xbb6721f4, 1231 0xbb6721f4,
1235 0x65b60076, 1232 0x65b60076,
@@ -1237,18 +1234,18 @@ uint32_t nvd0_pwr_code[] = {
1237 0x56bb0465, 1234 0x56bb0465,
1238 0xfd50bd02, 1235 0xfd50bd02,
1239 0x50fc0475, 1236 0x50fc0475,
1240 0x052c21f5, 1237 0x052021f5,
1241 0xf40464b6, 1238 0xf40464b6,
1242 0xe7f11811, 1239 0xe7f11811,
1243 0x21f41388, 1240 0x21f41388,
1244 0x0037f067, 1241 0x0037f067,
1245 0x04ca21f5, 1242 0x04be21f5,
1246 0x1388e7f1, 1243 0x1388e7f1,
1247/* 0x061f: i2c_bitw_out */ 1244/* 0x0613: i2c_bitw_out */
1248 0xf86721f4, 1245 0xf86721f4,
1249/* 0x0621: i2c_bitr */ 1246/* 0x0615: i2c_bitr */
1250 0x0137f000, 1247 0x0137f000,
1251 0x04e621f5, 1248 0x04da21f5,
1252 0x03e8e7f1, 1249 0x03e8e7f1,
1253 0xbb6721f4, 1250 0xbb6721f4,
1254 0x65b60076, 1251 0x65b60076,
@@ -1256,19 +1253,19 @@ uint32_t nvd0_pwr_code[] = {
1256 0x56bb0465, 1253 0x56bb0465,
1257 0xfd50bd02, 1254 0xfd50bd02,
1258 0x50fc0475, 1255 0x50fc0475,
1259 0x052c21f5, 1256 0x052021f5,
1260 0xf40464b6, 1257 0xf40464b6,
1261 0x21f51b11, 1258 0x21f51b11,
1262 0x37f00517, 1259 0x37f0050b,
1263 0xca21f500, 1260 0xbe21f500,
1264 0x88e7f104, 1261 0x88e7f104,
1265 0x6721f413, 1262 0x6721f413,
1266 0xf4013cf0, 1263 0xf4013cf0,
1267/* 0x0666: i2c_bitr_done */ 1264/* 0x065a: i2c_bitr_done */
1268 0x00f80131, 1265 0x00f80131,
1269/* 0x0668: i2c_get_byte */ 1266/* 0x065c: i2c_get_byte */
1270 0xf00057f0, 1267 0xf00057f0,
1271/* 0x066e: i2c_get_byte_next */ 1268/* 0x0662: i2c_get_byte_next */
1272 0x54b60847, 1269 0x54b60847,
1273 0x0076bb01, 1270 0x0076bb01,
1274 0xf90465b6, 1271 0xf90465b6,
@@ -1276,7 +1273,7 @@ uint32_t nvd0_pwr_code[] = {
1276 0xbd0256bb, 1273 0xbd0256bb,
1277 0x0475fd50, 1274 0x0475fd50,
1278 0x21f550fc, 1275 0x21f550fc,
1279 0x64b60621, 1276 0x64b60615,
1280 0x2b11f404, 1277 0x2b11f404,
1281 0xb60553fd, 1278 0xb60553fd,
1282 0x1bf40142, 1279 0x1bf40142,
@@ -1286,12 +1283,12 @@ uint32_t nvd0_pwr_code[] = {
1286 0xbb046594, 1283 0xbb046594,
1287 0x50bd0256, 1284 0x50bd0256,
1288 0xfc0475fd, 1285 0xfc0475fd,
1289 0xe021f550, 1286 0xd421f550,
1290 0x0464b605, 1287 0x0464b605,
1291/* 0x06b8: i2c_get_byte_done */ 1288/* 0x06ac: i2c_get_byte_done */
1292/* 0x06ba: i2c_put_byte */ 1289/* 0x06ae: i2c_put_byte */
1293 0x47f000f8, 1290 0x47f000f8,
1294/* 0x06bd: i2c_put_byte_next */ 1291/* 0x06b1: i2c_put_byte_next */
1295 0x0142b608, 1292 0x0142b608,
1296 0xbb3854ff, 1293 0xbb3854ff,
1297 0x65b60076, 1294 0x65b60076,
@@ -1299,7 +1296,7 @@ uint32_t nvd0_pwr_code[] = {
1299 0x56bb0465, 1296 0x56bb0465,
1300 0xfd50bd02, 1297 0xfd50bd02,
1301 0x50fc0475, 1298 0x50fc0475,
1302 0x05e021f5, 1299 0x05d421f5,
1303 0xf40464b6, 1300 0xf40464b6,
1304 0x46b03411, 1301 0x46b03411,
1305 0xd81bf400, 1302 0xd81bf400,
@@ -1308,21 +1305,21 @@ uint32_t nvd0_pwr_code[] = {
1308 0xbb046594, 1305 0xbb046594,
1309 0x50bd0256, 1306 0x50bd0256,
1310 0xfc0475fd, 1307 0xfc0475fd,
1311 0x2121f550, 1308 0x1521f550,
1312 0x0464b606, 1309 0x0464b606,
1313 0xbb0f11f4, 1310 0xbb0f11f4,
1314 0x36b00076, 1311 0x36b00076,
1315 0x061bf401, 1312 0x061bf401,
1316/* 0x0713: i2c_put_byte_done */ 1313/* 0x0707: i2c_put_byte_done */
1317 0xf80132f4, 1314 0xf80132f4,
1318/* 0x0715: i2c_addr */ 1315/* 0x0709: i2c_addr */
1319 0x0076bb00, 1316 0x0076bb00,
1320 0xf90465b6, 1317 0xf90465b6,
1321 0x04659450, 1318 0x04659450,
1322 0xbd0256bb, 1319 0xbd0256bb,
1323 0x0475fd50, 1320 0x0475fd50,
1324 0x21f550fc, 1321 0x21f550fc,
1325 0x64b60551, 1322 0x64b60545,
1326 0x2911f404, 1323 0x2911f404,
1327 0x012ec3e7, 1324 0x012ec3e7,
1328 0xfd0134b6, 1325 0xfd0134b6,
@@ -1332,23 +1329,23 @@ uint32_t nvd0_pwr_code[] = {
1332 0x0256bb04, 1329 0x0256bb04,
1333 0x75fd50bd, 1330 0x75fd50bd,
1334 0xf550fc04, 1331 0xf550fc04,
1335 0xb606ba21, 1332 0xb606ae21,
1336/* 0x075a: i2c_addr_done */ 1333/* 0x074e: i2c_addr_done */
1337 0x00f80464, 1334 0x00f80464,
1338/* 0x075c: i2c_acquire_addr */ 1335/* 0x0750: i2c_acquire_addr */
1339 0xb6f8cec7, 1336 0xb6f8cec7,
1340 0xe0b705e4, 1337 0xe0b705e4,
1341 0x00f8d014, 1338 0x00f8d014,
1342/* 0x0768: i2c_acquire */ 1339/* 0x075c: i2c_acquire */
1343 0x075c21f5, 1340 0x075021f5,
1344 0xf00421f4, 1341 0xf00421f4,
1345 0x21f403d9, 1342 0x21f403d9,
1346/* 0x0777: i2c_release */ 1343/* 0x076b: i2c_release */
1347 0xf500f833, 1344 0xf500f833,
1348 0xf4075c21, 1345 0xf4075021,
1349 0xdaf00421, 1346 0xdaf00421,
1350 0x3321f403, 1347 0x3321f403,
1351/* 0x0786: i2c_recv */ 1348/* 0x077a: i2c_recv */
1352 0x32f400f8, 1349 0x32f400f8,
1353 0xf8c1c701, 1350 0xf8c1c701,
1354 0xb00214b6, 1351 0xb00214b6,
@@ -1367,7 +1364,7 @@ uint32_t nvd0_pwr_code[] = {
1367 0xbb046594, 1364 0xbb046594,
1368 0x50bd0256, 1365 0x50bd0256,
1369 0xfc0475fd, 1366 0xfc0475fd,
1370 0x6821f550, 1367 0x5c21f550,
1371 0x0464b607, 1368 0x0464b607,
1372 0xd6b0d0fc, 1369 0xd6b0d0fc,
1373 0xb31bf500, 1370 0xb31bf500,
@@ -1377,7 +1374,7 @@ uint32_t nvd0_pwr_code[] = {
1377 0xbb046594, 1374 0xbb046594,
1378 0x50bd0256, 1375 0x50bd0256,
1379 0xfc0475fd, 1376 0xfc0475fd,
1380 0x1521f550, 1377 0x0921f550,
1381 0x0464b607, 1378 0x0464b607,
1382 0x00d011f5, 1379 0x00d011f5,
1383 0xbbe0c5c7, 1380 0xbbe0c5c7,
@@ -1386,7 +1383,7 @@ uint32_t nvd0_pwr_code[] = {
1386 0x56bb0465, 1383 0x56bb0465,
1387 0xfd50bd02, 1384 0xfd50bd02,
1388 0x50fc0475, 1385 0x50fc0475,
1389 0x06ba21f5, 1386 0x06ae21f5,
1390 0xf50464b6, 1387 0xf50464b6,
1391 0xf000ad11, 1388 0xf000ad11,
1392 0x76bb0157, 1389 0x76bb0157,
@@ -1395,7 +1392,7 @@ uint32_t nvd0_pwr_code[] = {
1395 0x0256bb04, 1392 0x0256bb04,
1396 0x75fd50bd, 1393 0x75fd50bd,
1397 0xf550fc04, 1394 0xf550fc04,
1398 0xb6071521, 1395 0xb6070921,
1399 0x11f50464, 1396 0x11f50464,
1400 0x76bb008a, 1397 0x76bb008a,
1401 0x0465b600, 1398 0x0465b600,
@@ -1403,7 +1400,7 @@ uint32_t nvd0_pwr_code[] = {
1403 0x0256bb04, 1400 0x0256bb04,
1404 0x75fd50bd, 1401 0x75fd50bd,
1405 0xf550fc04, 1402 0xf550fc04,
1406 0xb6066821, 1403 0xb6065c21,
1407 0x11f40464, 1404 0x11f40464,
1408 0xe05bcb6a, 1405 0xe05bcb6a,
1409 0xb60076bb, 1406 0xb60076bb,
@@ -1411,38 +1408,38 @@ uint32_t nvd0_pwr_code[] = {
1411 0xbb046594, 1408 0xbb046594,
1412 0x50bd0256, 1409 0x50bd0256,
1413 0xfc0475fd, 1410 0xfc0475fd,
1414 0xad21f550, 1411 0xa121f550,
1415 0x0464b605, 1412 0x0464b605,
1416 0xbd025bb9, 1413 0xbd025bb9,
1417 0x430ef474, 1414 0x430ef474,
1418/* 0x088c: i2c_recv_not_rd08 */ 1415/* 0x0880: i2c_recv_not_rd08 */
1419 0xf401d6b0, 1416 0xf401d6b0,
1420 0x57f03d1b, 1417 0x57f03d1b,
1421 0x1521f500, 1418 0x0921f500,
1422 0x3311f407, 1419 0x3311f407,
1423 0xf5e0c5c7, 1420 0xf5e0c5c7,
1424 0xf406ba21, 1421 0xf406ae21,
1425 0x57f02911, 1422 0x57f02911,
1426 0x1521f500, 1423 0x0921f500,
1427 0x1f11f407, 1424 0x1f11f407,
1428 0xf5e0b5c7, 1425 0xf5e0b5c7,
1429 0xf406ba21, 1426 0xf406ae21,
1430 0x21f51511, 1427 0x21f51511,
1431 0x74bd05ad, 1428 0x74bd05a1,
1432 0xf408c5c7, 1429 0xf408c5c7,
1433 0x32f4091b, 1430 0x32f4091b,
1434 0x030ef402, 1431 0x030ef402,
1435/* 0x08cc: i2c_recv_not_wr08 */ 1432/* 0x08c0: i2c_recv_not_wr08 */
1436/* 0x08cc: i2c_recv_done */ 1433/* 0x08c0: i2c_recv_done */
1437 0xf5f8cec7, 1434 0xf5f8cec7,
1438 0xfc077721, 1435 0xfc076b21,
1439 0xf4d0fce0, 1436 0xf4d0fce0,
1440 0x7cb90a12, 1437 0x7cb90a12,
1441 0x6b21f502, 1438 0x6b21f502,
1442/* 0x08e1: i2c_recv_exit */ 1439/* 0x08d5: i2c_recv_exit */
1443/* 0x08e3: i2c_init */ 1440/* 0x08d7: i2c_init */
1444 0xf800f802, 1441 0xf800f802,
1445/* 0x08e5: test_recv */ 1442/* 0x08d9: test_recv */
1446 0xd817f100, 1443 0xd817f100,
1447 0x0011cf05, 1444 0x0011cf05,
1448 0xf10110b6, 1445 0xf10110b6,
@@ -1451,28 +1448,28 @@ uint32_t nvd0_pwr_code[] = {
1451 0xd900e7f1, 1448 0xd900e7f1,
1452 0x134fe3f1, 1449 0x134fe3f1,
1453 0x01b621f5, 1450 0x01b621f5,
1454/* 0x0906: test_init */ 1451/* 0x08fa: test_init */
1455 0xe7f100f8, 1452 0xe7f100f8,
1456 0x21f50800, 1453 0x21f50800,
1457 0x00f801b6, 1454 0x00f801b6,
1458/* 0x0910: idle_recv */ 1455/* 0x0904: idle_recv */
1459/* 0x0912: idle */ 1456/* 0x0906: idle */
1460 0x31f400f8, 1457 0x31f400f8,
1461 0xd417f100, 1458 0xd417f100,
1462 0x0011cf05, 1459 0x0011cf05,
1463 0xf10110b6, 1460 0xf10110b6,
1464 0xd005d407, 1461 0xd005d407,
1465 0x04bd0001, 1462 0x04bd0001,
1466/* 0x0928: idle_loop */ 1463/* 0x091c: idle_loop */
1467 0xf45817f0, 1464 0xf45817f0,
1468/* 0x092e: idle_proc */ 1465/* 0x0922: idle_proc */
1469/* 0x092e: idle_proc_exec */ 1466/* 0x0922: idle_proc_exec */
1470 0x10f90232, 1467 0x10f90232,
1471 0xf5021eb9, 1468 0xf5021eb9,
1472 0xfc027421, 1469 0xfc027421,
1473 0x0911f410, 1470 0x0911f410,
1474 0xf40231f4, 1471 0xf40231f4,
1475/* 0x0942: idle_proc_next */ 1472/* 0x0936: idle_proc_next */
1476 0x10b6ef0e, 1473 0x10b6ef0e,
1477 0x061fb858, 1474 0x061fb858,
1478 0xf4e61bf4, 1475 0xf4e61bf4,
@@ -1521,4 +1518,7 @@ uint32_t nvd0_pwr_code[] = {
1521 0x00000000, 1518 0x00000000,
1522 0x00000000, 1519 0x00000000,
1523 0x00000000, 1520 0x00000000,
1521 0x00000000,
1522 0x00000000,
1523 0x00000000,
1524}; 1524};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
new file mode 100644
index 000000000000..d76612999b9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27#define nvd0_pwr_code gk104_pwr_code
28#define nvd0_pwr_data gk104_pwr_data
29#include "fuc/nvd0.fuc.h"
30
31static void
32gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
33{
34 nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000);
35 nv_rd32(ppwr, 0x000200);
36 nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000);
37 msleep(50);
38
39 nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002);
40 nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
41 nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
42
43 nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
44 msleep(50);
45
46 nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000);
47 nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
48 nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
49
50 nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000);
51 nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000);
52 nv_rd32(ppwr, 0x000200);
53}
54
55struct nouveau_oclass *
56gk104_pwr_oclass = &(struct nvkm_pwr_impl) {
57 .base.handle = NV_SUBDEV(PWR, 0xe4),
58 .base.ofuncs = &(struct nouveau_ofuncs) {
59 .ctor = _nouveau_pwr_ctor,
60 .dtor = _nouveau_pwr_dtor,
61 .init = _nouveau_pwr_init,
62 .fini = _nouveau_pwr_fini,
63 },
64 .code.data = gk104_pwr_code,
65 .code.size = sizeof(gk104_pwr_code),
66 .data.data = gk104_pwr_data,
67 .data.size = sizeof(gk104_pwr_data),
68 .pgob = gk104_pwr_pgob,
69}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 03de3107d29f..def6a9ac68cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -1,8 +1,7 @@
1#ifndef __NVKM_PWR_MEMX_H__ 1#ifndef __NVKM_PWR_MEMX_H__
2#define __NVKM_PWR_MEMX_H__ 2#define __NVKM_PWR_MEMX_H__
3 3
4#include <subdev/pwr.h> 4#include "priv.h"
5#include <subdev/pwr/fuc/os.h>
6 5
7struct nouveau_memx { 6struct nouveau_memx {
8 struct nouveau_pwr *ppwr; 7 struct nouveau_pwr *ppwr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
index 52c85414866a..04ff7c3c34e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -22,41 +22,20 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/pwr.h> 25#include "priv.h"
26
27#include "fuc/nv108.fuc.h" 26#include "fuc/nv108.fuc.h"
28 27
29struct nv108_pwr_priv { 28struct nouveau_oclass *
30 struct nouveau_pwr base; 29nv108_pwr_oclass = &(struct nvkm_pwr_impl) {
31}; 30 .base.handle = NV_SUBDEV(PWR, 0x00),
32 31 .base.ofuncs = &(struct nouveau_ofuncs) {
33static int 32 .ctor = _nouveau_pwr_ctor,
34nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv108_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nv108_pwr_code;
47 priv->base.code.size = sizeof(nv108_pwr_code);
48 priv->base.data.data = nv108_pwr_data;
49 priv->base.data.size = sizeof(nv108_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nv108_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0x00),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nv108_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor, 33 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init, 34 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini, 35 .fini = _nouveau_pwr_fini,
61 }, 36 },
62}; 37 .code.data = nv108_pwr_code,
38 .code.size = sizeof(nv108_pwr_code),
39 .data.data = nv108_pwr_data,
40 .data.size = sizeof(nv108_pwr_data),
41}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
index c132b7ca9747..998d53076b8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -22,50 +22,29 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/pwr.h> 25#include "priv.h"
26
27#include "fuc/nva3.fuc.h" 26#include "fuc/nva3.fuc.h"
28 27
29struct nva3_pwr_priv {
30 struct nouveau_pwr base;
31};
32
33static int 28static int
34nva3_pwr_init(struct nouveau_object *object) 29nva3_pwr_init(struct nouveau_object *object)
35{ 30{
36 struct nva3_pwr_priv *priv = (void *)object; 31 struct nouveau_pwr *ppwr = (void *)object;
37 nv_mask(priv, 0x022210, 0x00000001, 0x00000000); 32 nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x022210, 0x00000001, 0x00000001); 33 nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001);
39 return nouveau_pwr_init(&priv->base); 34 return nouveau_pwr_init(ppwr);
40}
41
42static int
43nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
44 struct nouveau_oclass *oclass, void *data, u32 size,
45 struct nouveau_object **pobject)
46{
47 struct nva3_pwr_priv *priv;
48 int ret;
49
50 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
51 *pobject = nv_object(priv);
52 if (ret)
53 return ret;
54
55 priv->base.code.data = nva3_pwr_code;
56 priv->base.code.size = sizeof(nva3_pwr_code);
57 priv->base.data.data = nva3_pwr_data;
58 priv->base.data.size = sizeof(nva3_pwr_data);
59 return 0;
60} 35}
61 36
62struct nouveau_oclass 37struct nouveau_oclass *
63nva3_pwr_oclass = { 38nva3_pwr_oclass = &(struct nvkm_pwr_impl) {
64 .handle = NV_SUBDEV(PWR, 0xa3), 39 .base.handle = NV_SUBDEV(PWR, 0xa3),
65 .ofuncs = &(struct nouveau_ofuncs) { 40 .base.ofuncs = &(struct nouveau_ofuncs) {
66 .ctor = nva3_pwr_ctor, 41 .ctor = _nouveau_pwr_ctor,
67 .dtor = _nouveau_pwr_dtor, 42 .dtor = _nouveau_pwr_dtor,
68 .init = nva3_pwr_init, 43 .init = nva3_pwr_init,
69 .fini = _nouveau_pwr_fini, 44 .fini = _nouveau_pwr_fini,
70 }, 45 },
71}; 46 .code.data = nva3_pwr_code,
47 .code.size = sizeof(nva3_pwr_code),
48 .data.data = nva3_pwr_data,
49 .data.size = sizeof(nva3_pwr_data),
50}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
index 495f6857428d..9a773e66efa4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -22,41 +22,20 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/pwr.h> 25#include "priv.h"
26
27#include "fuc/nvc0.fuc.h" 26#include "fuc/nvc0.fuc.h"
28 27
29struct nvc0_pwr_priv { 28struct nouveau_oclass *
30 struct nouveau_pwr base; 29nvc0_pwr_oclass = &(struct nvkm_pwr_impl) {
31}; 30 .base.handle = NV_SUBDEV(PWR, 0xc0),
32 31 .base.ofuncs = &(struct nouveau_ofuncs) {
33static int 32 .ctor = _nouveau_pwr_ctor,
34nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvc0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvc0_pwr_code;
47 priv->base.code.size = sizeof(nvc0_pwr_code);
48 priv->base.data.data = nvc0_pwr_data;
49 priv->base.data.size = sizeof(nvc0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvc0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xc0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvc0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor, 33 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init, 34 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini, 35 .fini = _nouveau_pwr_fini,
61 }, 36 },
62}; 37 .code.data = nvc0_pwr_code,
38 .code.size = sizeof(nvc0_pwr_code),
39 .data.data = nvc0_pwr_data,
40 .data.size = sizeof(nvc0_pwr_data),
41}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
index 043aa142fe82..2b29be5d08ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -22,41 +22,20 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/pwr.h> 25#include "priv.h"
26
27#include "fuc/nvd0.fuc.h" 26#include "fuc/nvd0.fuc.h"
28 27
29struct nvd0_pwr_priv { 28struct nouveau_oclass *
30 struct nouveau_pwr base; 29nvd0_pwr_oclass = &(struct nvkm_pwr_impl) {
31}; 30 .base.handle = NV_SUBDEV(PWR, 0xd0),
32 31 .base.ofuncs = &(struct nouveau_ofuncs) {
33static int 32 .ctor = _nouveau_pwr_ctor,
34nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nvd0_pwr_priv *priv;
39 int ret;
40
41 ret = nouveau_pwr_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.code.data = nvd0_pwr_code;
47 priv->base.code.size = sizeof(nvd0_pwr_code);
48 priv->base.data.data = nvd0_pwr_data;
49 priv->base.data.size = sizeof(nvd0_pwr_data);
50 return 0;
51}
52
53struct nouveau_oclass
54nvd0_pwr_oclass = {
55 .handle = NV_SUBDEV(PWR, 0xd0),
56 .ofuncs = &(struct nouveau_ofuncs) {
57 .ctor = nvd0_pwr_ctor,
58 .dtor = _nouveau_pwr_dtor, 33 .dtor = _nouveau_pwr_dtor,
59 .init = _nouveau_pwr_init, 34 .init = _nouveau_pwr_init,
60 .fini = _nouveau_pwr_fini, 35 .fini = _nouveau_pwr_fini,
61 }, 36 },
62}; 37 .code.data = nvd0_pwr_code,
38 .code.size = sizeof(nvd0_pwr_code),
39 .data.data = nvd0_pwr_data,
40 .data.size = sizeof(nvd0_pwr_data),
41}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
new file mode 100644
index 000000000000..3814a341db32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
@@ -0,0 +1,44 @@
1#ifndef __NVKM_PWR_PRIV_H__
2#define __NVKM_PWR_PRIV_H__
3
4#include <subdev/pwr.h>
5#include <subdev/pwr/fuc/os.h>
6
7#define nouveau_pwr_create(p, e, o, d) \
8 nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
9#define nouveau_pwr_destroy(p) \
10 nouveau_subdev_destroy(&(p)->base)
11#define nouveau_pwr_init(p) ({ \
12 struct nouveau_pwr *_ppwr = (p); \
13 _nouveau_pwr_init(nv_object(_ppwr)); \
14})
15#define nouveau_pwr_fini(p,s) ({ \
16 struct nouveau_pwr *_ppwr = (p); \
17 _nouveau_pwr_fini(nv_object(_ppwr), (s)); \
18})
19
20int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
21 struct nouveau_oclass *, int, void **);
22
23int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *,
24 struct nouveau_oclass *, void *, u32,
25 struct nouveau_object **);
26#define _nouveau_pwr_dtor _nouveau_subdev_dtor
27int _nouveau_pwr_init(struct nouveau_object *);
28int _nouveau_pwr_fini(struct nouveau_object *, bool);
29
30struct nvkm_pwr_impl {
31 struct nouveau_oclass base;
32 struct {
33 u32 *data;
34 u32 size;
35 } code;
36 struct {
37 u32 *data;
38 u32 size;
39 } data;
40
41 void (*pgob)(struct nouveau_pwr *, bool);
42};
43
44#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 668cf964e4a9..2d0988755530 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,7 +28,7 @@
28#include <subdev/timer.h> 28#include <subdev/timer.h>
29#include <subdev/fb.h> 29#include <subdev/fb.h>
30#include <subdev/vm.h> 30#include <subdev/vm.h>
31#include <subdev/ltcg.h> 31#include <subdev/ltc.h>
32#include <subdev/bar.h> 32#include <subdev/bar.h>
33 33
34struct nvc0_vmmgr_priv { 34struct nvc0_vmmgr_priv {
@@ -116,12 +116,12 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
116 pte <<= 3; 116 pte <<= 3;
117 117
118 if (mem->tag) { 118 if (mem->tag) {
119 struct nouveau_ltcg *ltcg = 119 struct nouveau_ltc *ltc =
120 nouveau_ltcg(vma->vm->vmm->base.base.parent); 120 nouveau_ltc(vma->vm->vmm->base.base.parent);
121 u32 tag = mem->tag->offset + (delta >> 17); 121 u32 tag = mem->tag->offset + (delta >> 17);
122 phys |= (u64)tag << (32 + 12); 122 phys |= (u64)tag << (32 + 12);
123 next |= (u64)1 << (32 + 12); 123 next |= (u64)1 << (32 + 12);
124 ltcg->tags_clear(ltcg, tag, cnt); 124 ltc->tags_clear(ltc, tag, cnt);
125 } 125 }
126 126
127 while (cnt--) { 127 while (cnt--) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2a15b98b4d2b..c6361422a0b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,12 +198,12 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
198 int *burst, int *lwm) 198 int *burst, int *lwm)
199{ 199{
200 struct nouveau_drm *drm = nouveau_drm(dev); 200 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nouveau_device *device = nouveau_dev(dev); 201 struct nvif_device *device = &nouveau_drm(dev)->device;
202 struct nv_fifo_info fifo_data; 202 struct nv_fifo_info fifo_data;
203 struct nv_sim_state sim_data; 203 struct nv_sim_state sim_data;
204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); 204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
205 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); 205 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
206 uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1); 206 uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
207 207
208 sim_data.pclk_khz = VClk; 208 sim_data.pclk_khz = VClk;
209 sim_data.mclk_khz = MClk; 209 sim_data.mclk_khz = MClk;
@@ -221,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
221 sim_data.mem_latency = 3; 221 sim_data.mem_latency = 3;
222 sim_data.mem_page_miss = 10; 222 sim_data.mem_page_miss = 10;
223 } else { 223 } else {
224 sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1; 224 sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1;
225 sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; 225 sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
226 sim_data.mem_latency = cfg1 & 0xf; 226 sim_data.mem_latency = cfg1 & 0xf;
227 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); 227 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
228 } 228 }
229 229
230 if (nv_device(drm->device)->card_type == NV_04) 230 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
231 nv04_calc_arb(&fifo_data, &sim_data); 231 nv04_calc_arb(&fifo_data, &sim_data);
232 else 232 else
233 nv10_calc_arb(&fifo_data, &sim_data); 233 nv10_calc_arb(&fifo_data, &sim_data);
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
254{ 254{
255 struct nouveau_drm *drm = nouveau_drm(dev); 255 struct nouveau_drm *drm = nouveau_drm(dev);
256 256
257 if (nv_device(drm->device)->card_type < NV_20) 257 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN)
258 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 41be3424c906..b90aa5c1f90a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -111,8 +111,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
111{ 111{
112 struct drm_device *dev = crtc->dev; 112 struct drm_device *dev = crtc->dev;
113 struct nouveau_drm *drm = nouveau_drm(dev); 113 struct nouveau_drm *drm = nouveau_drm(dev);
114 struct nouveau_bios *bios = nouveau_bios(drm->device); 114 struct nouveau_bios *bios = nvkm_bios(&drm->device);
115 struct nouveau_clock *clk = nouveau_clock(drm->device); 115 struct nouveau_clock *clk = nvkm_clock(&drm->device);
116 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 116 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
117 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; 117 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
118 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; 118 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -136,7 +136,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
136 * has yet been observed in allowing the use a single stage pll on all 136 * has yet been observed in allowing the use a single stage pll on all
137 * nv43 however. the behaviour of single stage use is untested on nv40 137 * nv43 however. the behaviour of single stage use is untested on nv40
138 */ 138 */
139 if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) 139 if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
140 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); 140 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
141 141
142 142
@@ -146,10 +146,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
146 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; 146 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
147 147
148 /* The blob uses this always, so let's do the same */ 148 /* The blob uses this always, so let's do the same */
149 if (nv_device(drm->device)->card_type == NV_40) 149 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
150 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; 150 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
151 /* again nv40 and some nv43 act more like nv3x as described above */ 151 /* again nv40 and some nv43 act more like nv3x as described above */
152 if (nv_device(drm->device)->chipset < 0x41) 152 if (drm->device.info.chipset < 0x41)
153 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | 153 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
154 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; 154 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
155 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; 155 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
@@ -275,7 +275,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
275 horizEnd = horizTotal - 2; 275 horizEnd = horizTotal - 2;
276 horizBlankEnd = horizTotal + 4; 276 horizBlankEnd = horizTotal + 4;
277#if 0 277#if 0
278 if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10) 278 if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
279 /* This reportedly works around some video overlay bandwidth problems */ 279 /* This reportedly works around some video overlay bandwidth problems */
280 horizTotal += 2; 280 horizTotal += 2;
281#endif 281#endif
@@ -509,7 +509,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
509 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | 509 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
510 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | 510 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
511 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; 511 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
512 if (nv_device(drm->device)->chipset >= 0x11) 512 if (drm->device.info.chipset >= 0x11)
513 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; 513 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
514 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 514 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
515 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; 515 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -550,26 +550,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
550 * 1 << 30 on 0x60.830), for no apparent reason */ 550 * 1 << 30 on 0x60.830), for no apparent reason */
551 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; 551 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
552 552
553 if (nv_device(drm->device)->card_type >= NV_30) 553 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
554 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; 554 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
555 555
556 regp->crtc_830 = mode->crtc_vdisplay - 3; 556 regp->crtc_830 = mode->crtc_vdisplay - 3;
557 regp->crtc_834 = mode->crtc_vdisplay - 1; 557 regp->crtc_834 = mode->crtc_vdisplay - 1;
558 558
559 if (nv_device(drm->device)->card_type == NV_40) 559 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
560 /* This is what the blob does */ 560 /* This is what the blob does */
561 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); 561 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
562 562
563 if (nv_device(drm->device)->card_type >= NV_30) 563 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
564 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 564 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
565 565
566 if (nv_device(drm->device)->card_type >= NV_10) 566 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
567 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; 567 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
568 else 568 else
569 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; 569 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
570 570
571 /* Some misc regs */ 571 /* Some misc regs */
572 if (nv_device(drm->device)->card_type == NV_40) { 572 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
573 regp->CRTC[NV_CIO_CRE_85] = 0xFF; 573 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
574 regp->CRTC[NV_CIO_CRE_86] = 0x1; 574 regp->CRTC[NV_CIO_CRE_86] = 0x1;
575 } 575 }
@@ -581,7 +581,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
581 581
582 /* Generic PRAMDAC regs */ 582 /* Generic PRAMDAC regs */
583 583
584 if (nv_device(drm->device)->card_type >= NV_10) 584 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
585 /* Only bit that bios and blob set. */ 585 /* Only bit that bios and blob set. */
586 regp->nv10_cursync = (1 << 25); 586 regp->nv10_cursync = (1 << 25);
587 587
@@ -590,7 +590,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
590 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; 590 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
591 if (crtc->primary->fb->depth == 16) 591 if (crtc->primary->fb->depth == 16)
592 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 592 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
593 if (nv_device(drm->device)->chipset >= 0x11) 593 if (drm->device.info.chipset >= 0x11)
594 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; 594 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
595 595
596 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ 596 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -653,7 +653,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
653 653
654 nv_crtc_mode_set_vga(crtc, adjusted_mode); 654 nv_crtc_mode_set_vga(crtc, adjusted_mode);
655 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ 655 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
656 if (nv_device(drm->device)->card_type == NV_40) 656 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
657 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); 657 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
658 nv_crtc_mode_set_regs(crtc, adjusted_mode); 658 nv_crtc_mode_set_regs(crtc, adjusted_mode);
659 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); 659 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
@@ -714,7 +714,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
714 714
715 /* Some more preparation. */ 715 /* Some more preparation. */
716 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); 716 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
717 if (nv_device(drm->device)->card_type == NV_40) { 717 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
718 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); 718 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
719 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); 719 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
720 } 720 }
@@ -888,7 +888,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
888 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); 888 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
889 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); 889 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
890 890
891 if (nv_device(drm->device)->card_type >= NV_20) { 891 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
892 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; 892 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
893 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); 893 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
894 } 894 }
@@ -915,9 +915,9 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
915 struct drm_device *dev = drm->dev; 915 struct drm_device *dev = drm->dev;
916 916
917 if (state == ENTER_ATOMIC_MODE_SET) 917 if (state == ENTER_ATOMIC_MODE_SET)
918 nouveau_fbcon_save_disable_accel(dev); 918 nouveau_fbcon_accel_save_disable(dev);
919 else 919 else
920 nouveau_fbcon_restore_accel(dev); 920 nouveau_fbcon_accel_restore(dev);
921 921
922 return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); 922 return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
923} 923}
@@ -969,7 +969,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
969 { 969 {
970 struct nouveau_drm *drm = nouveau_drm(dev); 970 struct nouveau_drm *drm = nouveau_drm(dev);
971 971
972 if (nv_device(drm->device)->chipset == 0x11) { 972 if (drm->device.info.chipset == 0x11) {
973 pixel = ((pixel & 0x000000ff) << 24) | 973 pixel = ((pixel & 0x000000ff) << 24) |
974 ((pixel & 0x0000ff00) << 8) | 974 ((pixel & 0x0000ff00) << 8) |
975 ((pixel & 0x00ff0000) >> 8) | 975 ((pixel & 0x00ff0000) >> 8) |
@@ -1010,7 +1010,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1010 if (ret) 1010 if (ret)
1011 goto out; 1011 goto out;
1012 1012
1013 if (nv_device(drm->device)->chipset >= 0x11) 1013 if (drm->device.info.chipset >= 0x11)
1014 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 1014 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
1015 else 1015 else
1016 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 1016 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index a810303169de..4e61173c3353 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
58 if (nv_device(drm->device)->card_type == NV_40) 58 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index); 59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
60} 60}
61 61
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index a96dda48718e..2d8056cde996 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
65 65
66static int sample_load_twice(struct drm_device *dev, bool sense[2]) 66static int sample_load_twice(struct drm_device *dev, bool sense[2])
67{ 67{
68 struct nouveau_device *device = nouveau_dev(dev); 68 struct nvif_device *device = &nouveau_drm(dev)->device;
69 struct nouveau_timer *ptimer = nouveau_timer(device); 69 struct nouveau_timer *ptimer = nvkm_timer(device);
70 int i; 70 int i;
71 71
72 for (i = 0; i < 2; i++) { 72 for (i = 0; i < 2; i++) {
@@ -95,15 +95,15 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
95 95
96 udelay(100); 96 udelay(100);
97 /* when level triggers, sense is _LO_ */ 97 /* when level triggers, sense is _LO_ */
98 sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; 98 sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
99 99
100 /* take another reading until it agrees with sense_a... */ 100 /* take another reading until it agrees with sense_a... */
101 do { 101 do {
102 udelay(100); 102 udelay(100);
103 sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; 103 sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
104 if (sense_a != sense_b) { 104 if (sense_a != sense_b) {
105 sense_b_prime = 105 sense_b_prime =
106 nv_rd08(device, NV_PRMCIO_INP0) & 0x10; 106 nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
107 if (sense_b == sense_b_prime) { 107 if (sense_b == sense_b_prime) {
108 /* ... unless two consecutive subsequent 108 /* ... unless two consecutive subsequent
109 * samples agree; sense_a is replaced */ 109 * samples agree; sense_a is replaced */
@@ -128,7 +128,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
128 struct drm_connector *connector) 128 struct drm_connector *connector)
129{ 129{
130 struct drm_device *dev = encoder->dev; 130 struct drm_device *dev = encoder->dev;
131 struct nouveau_device *device = nouveau_dev(dev); 131 struct nvif_device *device = &nouveau_drm(dev)->device;
132 struct nouveau_drm *drm = nouveau_drm(dev); 132 struct nouveau_drm *drm = nouveau_drm(dev);
133 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 133 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
134 uint8_t saved_palette0[3], saved_palette_mask; 134 uint8_t saved_palette0[3], saved_palette_mask;
@@ -164,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
164 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); 164 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
165 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); 165 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
166 166
167 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); 167 nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
168 for (i = 0; i < 3; i++) 168 for (i = 0; i < 3; i++)
169 saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA); 169 saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA);
170 saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK); 170 saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK);
171 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); 171 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
172 172
173 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); 173 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
174 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, 174 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -181,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
181 do { 181 do {
182 bool sense_pair[2]; 182 bool sense_pair[2];
183 183
184 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 184 nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
185 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); 185 nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
186 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); 186 nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
187 /* testing blue won't find monochrome monitors. I don't care */ 187 /* testing blue won't find monochrome monitors. I don't care */
188 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); 188 nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
189 189
190 i = 0; 190 i = 0;
191 /* take sample pairs until both samples in the pair agree */ 191 /* take sample pairs until both samples in the pair agree */
@@ -208,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
208 } while (++blue < 0x18 && sense); 208 } while (++blue < 0x18 && sense);
209 209
210out: 210out:
211 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); 211 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
212 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); 212 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
213 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 213 nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
214 for (i = 0; i < 3; i++) 214 for (i = 0; i < 3; i++)
215 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); 215 nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
216 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); 216 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
217 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 217 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
218 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 218 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -231,8 +231,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
231{ 231{
232 struct drm_device *dev = encoder->dev; 232 struct drm_device *dev = encoder->dev;
233 struct nouveau_drm *drm = nouveau_drm(dev); 233 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_device *device = nouveau_dev(dev); 234 struct nvif_device *device = &nouveau_drm(dev)->device;
235 struct nouveau_gpio *gpio = nouveau_gpio(device); 235 struct nouveau_gpio *gpio = nvkm_gpio(device);
236 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 236 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
237 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 237 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
238 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 238 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -256,12 +256,12 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
256 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 256 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
257 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); 257 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
258 258
259 saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2); 259 saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2);
260 260
261 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); 261 nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
262 if (regoffset == 0x68) { 262 if (regoffset == 0x68) {
263 saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4); 263 saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4);
264 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); 264 nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
265 } 265 }
266 266
267 if (gpio) { 267 if (gpio) {
@@ -283,7 +283,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
283 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ 283 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
284 routput = (saved_routput & 0xfffffece) | head << 8; 284 routput = (saved_routput & 0xfffffece) | head << 8;
285 285
286 if (nv_device(drm->device)->card_type >= NV_40) { 286 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
287 if (dcb->type == DCB_OUTPUT_TV) 287 if (dcb->type == DCB_OUTPUT_TV)
288 routput |= 0x1a << 16; 288 routput |= 0x1a << 16;
289 else 289 else
@@ -316,8 +316,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
316 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); 316 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
317 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); 317 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
318 if (regoffset == 0x68) 318 if (regoffset == 0x68)
319 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); 319 nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
320 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); 320 nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
321 321
322 if (gpio) { 322 if (gpio) {
323 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); 323 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
@@ -398,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
398 } 398 }
399 399
400 /* This could use refinement for flatpanels, but it should work this way */ 400 /* This could use refinement for flatpanels, but it should work this way */
401 if (nv_device(drm->device)->chipset < 0x44) 401 if (drm->device.info.chipset < 0x44)
402 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 402 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
403 else 403 else
404 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 404 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index e57babb206d3..42a5435259f7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
281 struct drm_display_mode *adjusted_mode) 281 struct drm_display_mode *adjusted_mode)
282{ 282{
283 struct drm_device *dev = encoder->dev; 283 struct drm_device *dev = encoder->dev;
284 struct nouveau_device *device = nouveau_dev(dev); 284 struct nvif_device *device = &nouveau_drm(dev)->device;
285 struct nouveau_drm *drm = nouveau_drm(dev); 285 struct nouveau_drm *drm = nouveau_drm(dev);
286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -335,7 +335,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; 335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
336 else /* gpu needs to scale */ 336 else /* gpu needs to scale */
337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; 337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
338 if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) 338 if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; 339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && 340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
341 output_mode->clock > 165000) 341 output_mode->clock > 165000)
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || 416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO && 417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
418 encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) { 418 encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
419 if (nv_device(drm->device)->chipset == 0x11) 419 if (drm->device.info.chipset == 0x11)
420 regp->dither = savep->dither | 0x00010000; 420 regp->dither = savep->dither | 0x00010000;
421 else { 421 else {
422 int i; 422 int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
427 } 427 }
428 } 428 }
429 } else { 429 } else {
430 if (nv_device(drm->device)->chipset != 0x11) { 430 if (drm->device.info.chipset != 0x11) {
431 /* reset them */ 431 /* reset them */
432 int i; 432 int i;
433 for (i = 0; i < 3; i++) { 433 for (i = 0; i < 3; i++) {
@@ -463,7 +463,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); 463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
464 464
465 /* This could use refinement for flatpanels, but it should work this way */ 465 /* This could use refinement for flatpanels, but it should work this way */
466 if (nv_device(drm->device)->chipset < 0x44) 466 if (drm->device.info.chipset < 0x44)
467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
468 else 468 else
469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
485{ 485{
486#ifdef __powerpc__ 486#ifdef __powerpc__
487 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
488 struct nouveau_device *device = nouveau_dev(dev); 488 struct nvif_device *device = &nouveau_drm(dev)->device;
489 489
490 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
491 * Apple for your consistency. 491 * Apple for your consistency.
@@ -623,7 +623,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
623 struct drm_device *dev = encoder->dev; 623 struct drm_device *dev = encoder->dev;
624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; 624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
625 struct nouveau_drm *drm = nouveau_drm(dev); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 626 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
627 struct nouveau_i2c_port *port = i2c->find(i2c, 2); 627 struct nouveau_i2c_port *port = i2c->find(i2c, 2);
628 struct nouveau_i2c_board_info info[] = { 628 struct nouveau_i2c_board_info info[] = {
629 { 629 {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4342fdaee707..3d0afa1c6cff 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -22,9 +22,6 @@
22 * Author: Ben Skeggs 22 * Author: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <drm/drmP.h> 25#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h> 26#include <drm/drm_crtc_helper.h>
30 27
@@ -34,8 +31,6 @@
34#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 32#include "nouveau_connector.h"
36 33
37#include <subdev/i2c.h>
38
39int 34int
40nv04_display_early_init(struct drm_device *dev) 35nv04_display_early_init(struct drm_device *dev)
41{ 36{
@@ -58,7 +53,7 @@ int
58nv04_display_create(struct drm_device *dev) 53nv04_display_create(struct drm_device *dev)
59{ 54{
60 struct nouveau_drm *drm = nouveau_drm(dev); 55 struct nouveau_drm *drm = nouveau_drm(dev);
61 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 56 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
62 struct dcb_table *dcb = &drm->vbios.dcb; 57 struct dcb_table *dcb = &drm->vbios.dcb;
63 struct drm_connector *connector, *ct; 58 struct drm_connector *connector, *ct;
64 struct drm_encoder *encoder; 59 struct drm_encoder *encoder;
@@ -70,6 +65,8 @@ nv04_display_create(struct drm_device *dev)
70 if (!disp) 65 if (!disp)
71 return -ENOMEM; 66 return -ENOMEM;
72 67
68 nvif_object_map(nvif_object(&drm->device));
69
73 nouveau_display(dev)->priv = disp; 70 nouveau_display(dev)->priv = disp;
74 nouveau_display(dev)->dtor = nv04_display_destroy; 71 nouveau_display(dev)->dtor = nv04_display_destroy;
75 nouveau_display(dev)->init = nv04_display_init; 72 nouveau_display(dev)->init = nv04_display_init;
@@ -144,6 +141,7 @@ void
144nv04_display_destroy(struct drm_device *dev) 141nv04_display_destroy(struct drm_device *dev)
145{ 142{
146 struct nv04_display *disp = nv04_display(dev); 143 struct nv04_display *disp = nv04_display(dev);
144 struct nouveau_drm *drm = nouveau_drm(dev);
147 struct drm_encoder *encoder; 145 struct drm_encoder *encoder;
148 struct drm_crtc *crtc; 146 struct drm_crtc *crtc;
149 147
@@ -170,6 +168,8 @@ nv04_display_destroy(struct drm_device *dev)
170 168
171 nouveau_display(dev)->priv = NULL; 169 nouveau_display(dev)->priv = NULL;
172 kfree(disp); 170 kfree(disp);
171
172 nvif_object_unmap(nvif_object(&drm->device));
173} 173}
174 174
175int 175int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 4245fc3dab70..17b899d9aba3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -131,7 +131,7 @@ nv_two_heads(struct drm_device *dev)
131 struct nouveau_drm *drm = nouveau_drm(dev); 131 struct nouveau_drm *drm = nouveau_drm(dev);
132 const int impl = dev->pdev->device & 0x0ff0; 132 const int impl = dev->pdev->device & 0x0ff0;
133 133
134 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && 134 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
135 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) 135 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
136 return true; 136 return true;
137 137
@@ -150,7 +150,7 @@ nv_two_reg_pll(struct drm_device *dev)
150 struct nouveau_drm *drm = nouveau_drm(dev); 150 struct nouveau_drm *drm = nouveau_drm(dev);
151 const int impl = dev->pdev->device & 0x0ff0; 151 const int impl = dev->pdev->device & 0x0ff0;
152 152
153 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) 153 if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE)
154 return true; 154 return true;
155 return false; 155 return false;
156} 156}
@@ -171,8 +171,8 @@ static inline void
171nouveau_bios_run_init_table(struct drm_device *dev, u16 table, 171nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
172 struct dcb_output *outp, int crtc) 172 struct dcb_output *outp, int crtc)
173{ 173{
174 struct nouveau_device *device = nouveau_dev(dev); 174 struct nouveau_drm *drm = nouveau_drm(dev);
175 struct nouveau_bios *bios = nouveau_bios(device); 175 struct nouveau_bios *bios = nvkm_bios(&drm->device);
176 struct nvbios_init init = { 176 struct nvbios_init init = {
177 .subdev = nv_subdev(bios), 177 .subdev = nv_subdev(bios),
178 .bios = bios, 178 .bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index aca76af115b3..3d4c19300768 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,9 +27,6 @@
27#include "hw.h" 27#include "hw.h"
28 28
29#include <subdev/bios/pll.h> 29#include <subdev/bios/pll.h>
30#include <subdev/fb.h>
31#include <subdev/clock.h>
32#include <subdev/timer.h>
33 30
34#define CHIPSET_NFORCE 0x01a0 31#define CHIPSET_NFORCE 0x01a0
35#define CHIPSET_NFORCE2 0x01f0 32#define CHIPSET_NFORCE2 0x01f0
@@ -92,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
92 if (owner == 1) 89 if (owner == 1)
93 owner *= 3; 90 owner *= 3;
94 91
95 if (nv_device(drm->device)->chipset == 0x11) { 92 if (drm->device.info.chipset == 0x11) {
96 /* This might seem stupid, but the blob does it and 93 /* This might seem stupid, but the blob does it and
97 * omitting it often locks the system up. 94 * omitting it often locks the system up.
98 */ 95 */
@@ -103,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
103 /* CR44 is always changed on CRTC0 */ 100 /* CR44 is always changed on CRTC0 */
104 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); 101 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
105 102
106 if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */ 103 if (drm->device.info.chipset == 0x11) { /* set me harder */
107 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 104 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
108 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 105 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
109 } 106 }
@@ -152,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
152 pllvals->NM1 = pll1 & 0xffff; 149 pllvals->NM1 = pll1 & 0xffff;
153 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) 150 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
154 pllvals->NM2 = pll2 & 0xffff; 151 pllvals->NM2 = pll2 & 0xffff;
155 else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) { 152 else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) {
156 pllvals->M1 &= 0xf; /* only 4 bits */ 153 pllvals->M1 &= 0xf; /* only 4 bits */
157 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { 154 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
158 pllvals->M2 = (pll1 >> 4) & 0x7; 155 pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -168,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
168 struct nouveau_pll_vals *pllvals) 165 struct nouveau_pll_vals *pllvals)
169{ 166{
170 struct nouveau_drm *drm = nouveau_drm(dev); 167 struct nouveau_drm *drm = nouveau_drm(dev);
171 struct nouveau_device *device = nv_device(drm->device); 168 struct nvif_device *device = &drm->device;
172 struct nouveau_bios *bios = nouveau_bios(device); 169 struct nouveau_bios *bios = nvkm_bios(device);
173 uint32_t reg1, pll1, pll2 = 0; 170 uint32_t reg1, pll1, pll2 = 0;
174 struct nvbios_pll pll_lim; 171 struct nvbios_pll pll_lim;
175 int ret; 172 int ret;
@@ -178,16 +175,16 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
178 if (ret || !(reg1 = pll_lim.reg)) 175 if (ret || !(reg1 = pll_lim.reg))
179 return -ENOENT; 176 return -ENOENT;
180 177
181 pll1 = nv_rd32(device, reg1); 178 pll1 = nvif_rd32(device, reg1);
182 if (reg1 <= 0x405c) 179 if (reg1 <= 0x405c)
183 pll2 = nv_rd32(device, reg1 + 4); 180 pll2 = nvif_rd32(device, reg1 + 4);
184 else if (nv_two_reg_pll(dev)) { 181 else if (nv_two_reg_pll(dev)) {
185 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); 182 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
186 183
187 pll2 = nv_rd32(device, reg2); 184 pll2 = nvif_rd32(device, reg2);
188 } 185 }
189 186
190 if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { 187 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
191 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); 188 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
192 189
193 /* check whether vpll has been forced into single stage mode */ 190 /* check whether vpll has been forced into single stage mode */
@@ -255,9 +252,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
255 */ 252 */
256 253
257 struct nouveau_drm *drm = nouveau_drm(dev); 254 struct nouveau_drm *drm = nouveau_drm(dev);
258 struct nouveau_device *device = nv_device(drm->device); 255 struct nvif_device *device = &drm->device;
259 struct nouveau_clock *clk = nouveau_clock(device); 256 struct nouveau_clock *clk = nvkm_clock(device);
260 struct nouveau_bios *bios = nouveau_bios(device); 257 struct nouveau_bios *bios = nvkm_bios(device);
261 struct nvbios_pll pll_lim; 258 struct nvbios_pll pll_lim;
262 struct nouveau_pll_vals pv; 259 struct nouveau_pll_vals pv;
263 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; 260 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -394,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
394 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 391 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
395 int i; 392 int i;
396 393
397 if (nv_device(drm->device)->card_type >= NV_10) 394 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
398 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); 395 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
399 396
400 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals); 397 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
401 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); 398 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
402 if (nv_two_heads(dev)) 399 if (nv_two_heads(dev))
403 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 400 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
404 if (nv_device(drm->device)->chipset == 0x11) 401 if (drm->device.info.chipset == 0x11)
405 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); 402 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
406 403
407 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); 404 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
408 405
409 if (nv_gf4_disp_arch(dev)) 406 if (nv_gf4_disp_arch(dev))
410 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); 407 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
411 if (nv_device(drm->device)->chipset >= 0x30) 408 if (drm->device.info.chipset >= 0x30)
412 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); 409 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
413 410
414 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); 411 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -450,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
450 if (nv_gf4_disp_arch(dev)) 447 if (nv_gf4_disp_arch(dev))
451 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); 448 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
452 449
453 if (nv_device(drm->device)->card_type == NV_40) { 450 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
454 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); 451 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
455 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); 452 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
456 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); 453 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -466,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
466 struct nv04_mode_state *state) 463 struct nv04_mode_state *state)
467{ 464{
468 struct nouveau_drm *drm = nouveau_drm(dev); 465 struct nouveau_drm *drm = nouveau_drm(dev);
469 struct nouveau_clock *clk = nouveau_clock(drm->device); 466 struct nouveau_clock *clk = nvkm_clock(&drm->device);
470 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 467 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
471 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 468 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
472 int i; 469 int i;
473 470
474 if (nv_device(drm->device)->card_type >= NV_10) 471 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
475 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); 472 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
476 473
477 clk->pll_prog(clk, pllreg, &regp->pllvals); 474 clk->pll_prog(clk, pllreg, &regp->pllvals);
478 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); 475 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
479 if (nv_two_heads(dev)) 476 if (nv_two_heads(dev))
480 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); 477 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
481 if (nv_device(drm->device)->chipset == 0x11) 478 if (drm->device.info.chipset == 0x11)
482 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); 479 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
483 480
484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); 481 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
485 482
486 if (nv_gf4_disp_arch(dev)) 483 if (nv_gf4_disp_arch(dev))
487 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); 484 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
488 if (nv_device(drm->device)->chipset >= 0x30) 485 if (drm->device.info.chipset >= 0x30)
489 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
490 487
491 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -522,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
522 if (nv_gf4_disp_arch(dev)) 519 if (nv_gf4_disp_arch(dev))
523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); 520 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
524 521
525 if (nv_device(drm->device)->card_type == NV_40) { 522 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
526 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); 523 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
527 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); 524 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
528 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -603,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
603 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 600 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
604 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); 601 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
605 602
606 if (nv_device(drm->device)->card_type >= NV_20) 603 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
607 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); 604 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
608 605
609 if (nv_device(drm->device)->card_type >= NV_30) 606 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
610 rd_cio_state(dev, head, regp, 0x9f); 607 rd_cio_state(dev, head, regp, 0x9f);
611 608
612 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); 609 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -615,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
615 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 612 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
616 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 613 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
617 614
618 if (nv_device(drm->device)->card_type >= NV_10) { 615 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
619 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); 616 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
620 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); 617 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
621 618
622 if (nv_device(drm->device)->card_type >= NV_30) 619 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
623 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); 620 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
624 621
625 if (nv_device(drm->device)->card_type == NV_40) 622 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
626 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); 623 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
627 624
628 if (nv_two_heads(dev)) 625 if (nv_two_heads(dev))
@@ -634,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
634 631
635 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 632 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
636 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 633 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
637 if (nv_device(drm->device)->card_type >= NV_10) { 634 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
638 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
639 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 636 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
640 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -663,14 +660,13 @@ nv_load_state_ext(struct drm_device *dev, int head,
663 struct nv04_mode_state *state) 660 struct nv04_mode_state *state)
664{ 661{
665 struct nouveau_drm *drm = nouveau_drm(dev); 662 struct nouveau_drm *drm = nouveau_drm(dev);
666 struct nouveau_device *device = nv_device(drm->device); 663 struct nvif_device *device = &drm->device;
667 struct nouveau_timer *ptimer = nouveau_timer(device); 664 struct nouveau_timer *ptimer = nvkm_timer(device);
668 struct nouveau_fb *pfb = nouveau_fb(device);
669 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 665 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
670 uint32_t reg900; 666 uint32_t reg900;
671 int i; 667 int i;
672 668
673 if (nv_device(drm->device)->card_type >= NV_10) { 669 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
674 if (nv_two_heads(dev)) 670 if (nv_two_heads(dev))
675 /* setting ENGINE_CTRL (EC) *must* come before 671 /* setting ENGINE_CTRL (EC) *must* come before
676 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in 672 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -678,24 +674,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
678 */ 674 */
679 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); 675 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
680 676
681 nv_wr32(device, NV_PVIDEO_STOP, 1); 677 nvif_wr32(device, NV_PVIDEO_STOP, 1);
682 nv_wr32(device, NV_PVIDEO_INTR_EN, 0); 678 nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
683 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); 679 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
684 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); 680 nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
685 nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1); 681 nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
686 nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1); 682 nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
687 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1); 683 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
688 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1); 684 nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
689 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); 685 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
690 686
691 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 687 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
692 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); 688 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
693 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); 689 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
694 690
695 if (nv_device(drm->device)->card_type >= NV_30) 691 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
696 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); 692 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
697 693
698 if (nv_device(drm->device)->card_type == NV_40) { 694 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
699 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 695 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
700 696
701 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 697 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -718,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
718 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); 714 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
719 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 715 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
720 716
721 if (nv_device(drm->device)->card_type >= NV_20) 717 if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
722 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); 718 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
723 719
724 if (nv_device(drm->device)->card_type >= NV_30) 720 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
725 wr_cio_state(dev, head, regp, 0x9f); 721 wr_cio_state(dev, head, regp, 0x9f);
726 722
727 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); 723 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
728 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 724 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
729 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
730 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 726 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
731 if (nv_device(drm->device)->card_type == NV_40) 727 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
732 nv_fix_nv40_hw_cursor(dev, head); 728 nv_fix_nv40_hw_cursor(dev, head);
733 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 729 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
734 730
735 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
736 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 732 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
737 if (nv_device(drm->device)->card_type >= NV_10) { 733 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
738 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
739 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 735 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
740 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); 736 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -742,7 +738,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
742 } 738 }
743 /* NV11 and NV20 stop at 0x52. */ 739 /* NV11 and NV20 stop at 0x52. */
744 if (nv_gf4_disp_arch(dev)) { 740 if (nv_gf4_disp_arch(dev)) {
745 if (nv_device(drm->device)->card_type < NV_20) { 741 if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
746 /* Not waiting for vertical retrace before modifying 742 /* Not waiting for vertical retrace before modifying
747 CRE_53/CRE_54 causes lockups. */ 743 CRE_53/CRE_54 causes lockups. */
748 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 744 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
@@ -769,15 +765,15 @@ static void
769nv_save_state_palette(struct drm_device *dev, int head, 765nv_save_state_palette(struct drm_device *dev, int head,
770 struct nv04_mode_state *state) 766 struct nv04_mode_state *state)
771{ 767{
772 struct nouveau_device *device = nouveau_dev(dev); 768 struct nvif_device *device = &nouveau_drm(dev)->device;
773 int head_offset = head * NV_PRMDIO_SIZE, i; 769 int head_offset = head * NV_PRMDIO_SIZE, i;
774 770
775 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 771 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
776 NV_PRMDIO_PIXEL_MASK_MASK); 772 NV_PRMDIO_PIXEL_MASK_MASK);
777 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); 773 nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
778 774
779 for (i = 0; i < 768; i++) { 775 for (i = 0; i < 768; i++) {
780 state->crtc_reg[head].DAC[i] = nv_rd08(device, 776 state->crtc_reg[head].DAC[i] = nvif_rd08(device,
781 NV_PRMDIO_PALETTE_DATA + head_offset); 777 NV_PRMDIO_PALETTE_DATA + head_offset);
782 } 778 }
783 779
@@ -788,15 +784,15 @@ void
788nouveau_hw_load_state_palette(struct drm_device *dev, int head, 784nouveau_hw_load_state_palette(struct drm_device *dev, int head,
789 struct nv04_mode_state *state) 785 struct nv04_mode_state *state)
790{ 786{
791 struct nouveau_device *device = nouveau_dev(dev); 787 struct nvif_device *device = &nouveau_drm(dev)->device;
792 int head_offset = head * NV_PRMDIO_SIZE, i; 788 int head_offset = head * NV_PRMDIO_SIZE, i;
793 789
794 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, 790 nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
795 NV_PRMDIO_PIXEL_MASK_MASK); 791 NV_PRMDIO_PIXEL_MASK_MASK);
796 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); 792 nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
797 793
798 for (i = 0; i < 768; i++) { 794 for (i = 0; i < 768; i++) {
799 nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, 795 nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
800 state->crtc_reg[head].DAC[i]); 796 state->crtc_reg[head].DAC[i]);
801 } 797 }
802 798
@@ -808,7 +804,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
808{ 804{
809 struct nouveau_drm *drm = nouveau_drm(dev); 805 struct nouveau_drm *drm = nouveau_drm(dev);
810 806
811 if (nv_device(drm->device)->chipset == 0x11) 807 if (drm->device.info.chipset == 0x11)
812 /* NB: no attempt is made to restore the bad pll later on */ 808 /* NB: no attempt is made to restore the bad pll later on */
813 nouveau_hw_fix_bad_vpll(dev, head); 809 nouveau_hw_fix_bad_vpll(dev, head);
814 nv_save_state_ramdac(dev, head, state); 810 nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index eeb70d912d99..7f53c571f31f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,41 +60,41 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
60static inline uint32_t NVReadCRTC(struct drm_device *dev, 60static inline uint32_t NVReadCRTC(struct drm_device *dev,
61 int head, uint32_t reg) 61 int head, uint32_t reg)
62{ 62{
63 struct nouveau_device *device = nouveau_dev(dev); 63 struct nvif_device *device = &nouveau_drm(dev)->device;
64 uint32_t val; 64 uint32_t val;
65 if (head) 65 if (head)
66 reg += NV_PCRTC0_SIZE; 66 reg += NV_PCRTC0_SIZE;
67 val = nv_rd32(device, reg); 67 val = nvif_rd32(device, reg);
68 return val; 68 return val;
69} 69}
70 70
71static inline void NVWriteCRTC(struct drm_device *dev, 71static inline void NVWriteCRTC(struct drm_device *dev,
72 int head, uint32_t reg, uint32_t val) 72 int head, uint32_t reg, uint32_t val)
73{ 73{
74 struct nouveau_device *device = nouveau_dev(dev); 74 struct nvif_device *device = &nouveau_drm(dev)->device;
75 if (head) 75 if (head)
76 reg += NV_PCRTC0_SIZE; 76 reg += NV_PCRTC0_SIZE;
77 nv_wr32(device, reg, val); 77 nvif_wr32(device, reg, val);
78} 78}
79 79
80static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 80static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
81 int head, uint32_t reg) 81 int head, uint32_t reg)
82{ 82{
83 struct nouveau_device *device = nouveau_dev(dev); 83 struct nvif_device *device = &nouveau_drm(dev)->device;
84 uint32_t val; 84 uint32_t val;
85 if (head) 85 if (head)
86 reg += NV_PRAMDAC0_SIZE; 86 reg += NV_PRAMDAC0_SIZE;
87 val = nv_rd32(device, reg); 87 val = nvif_rd32(device, reg);
88 return val; 88 return val;
89} 89}
90 90
91static inline void NVWriteRAMDAC(struct drm_device *dev, 91static inline void NVWriteRAMDAC(struct drm_device *dev,
92 int head, uint32_t reg, uint32_t val) 92 int head, uint32_t reg, uint32_t val)
93{ 93{
94 struct nouveau_device *device = nouveau_dev(dev); 94 struct nvif_device *device = &nouveau_drm(dev)->device;
95 if (head) 95 if (head)
96 reg += NV_PRAMDAC0_SIZE; 96 reg += NV_PRAMDAC0_SIZE;
97 nv_wr32(device, reg, val); 97 nvif_wr32(device, reg, val);
98} 98}
99 99
100static inline uint8_t nv_read_tmds(struct drm_device *dev, 100static inline uint8_t nv_read_tmds(struct drm_device *dev,
@@ -120,18 +120,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
120static inline void NVWriteVgaCrtc(struct drm_device *dev, 120static inline void NVWriteVgaCrtc(struct drm_device *dev,
121 int head, uint8_t index, uint8_t value) 121 int head, uint8_t index, uint8_t value)
122{ 122{
123 struct nouveau_device *device = nouveau_dev(dev); 123 struct nvif_device *device = &nouveau_drm(dev)->device;
124 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 124 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
125 nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); 125 nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
126} 126}
127 127
128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 128static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
129 int head, uint8_t index) 129 int head, uint8_t index)
130{ 130{
131 struct nouveau_device *device = nouveau_dev(dev); 131 struct nvif_device *device = &nouveau_drm(dev)->device;
132 uint8_t val; 132 uint8_t val;
133 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 133 nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
134 val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); 134 val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
135 return val; 135 return val;
136} 136}
137 137
@@ -165,74 +165,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
165static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 165static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
166 int head, uint32_t reg) 166 int head, uint32_t reg)
167{ 167{
168 struct nouveau_device *device = nouveau_dev(dev); 168 struct nvif_device *device = &nouveau_drm(dev)->device;
169 struct nouveau_drm *drm = nouveau_drm(dev); 169 struct nouveau_drm *drm = nouveau_drm(dev);
170 uint8_t val; 170 uint8_t val;
171 171
172 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 172 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
173 * NVSetOwner for the relevant head to be programmed */ 173 * NVSetOwner for the relevant head to be programmed */
174 if (head && nv_device(drm->device)->card_type == NV_40) 174 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
175 reg += NV_PRMVIO_SIZE; 175 reg += NV_PRMVIO_SIZE;
176 176
177 val = nv_rd08(device, reg); 177 val = nvif_rd08(device, reg);
178 return val; 178 return val;
179} 179}
180 180
181static inline void NVWritePRMVIO(struct drm_device *dev, 181static inline void NVWritePRMVIO(struct drm_device *dev,
182 int head, uint32_t reg, uint8_t value) 182 int head, uint32_t reg, uint8_t value)
183{ 183{
184 struct nouveau_device *device = nouveau_dev(dev); 184 struct nvif_device *device = &nouveau_drm(dev)->device;
185 struct nouveau_drm *drm = nouveau_drm(dev); 185 struct nouveau_drm *drm = nouveau_drm(dev);
186 186
187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 187 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
188 * NVSetOwner for the relevant head to be programmed */ 188 * NVSetOwner for the relevant head to be programmed */
189 if (head && nv_device(drm->device)->card_type == NV_40) 189 if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
190 reg += NV_PRMVIO_SIZE; 190 reg += NV_PRMVIO_SIZE;
191 191
192 nv_wr08(device, reg, value); 192 nvif_wr08(device, reg, value);
193} 193}
194 194
195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 195static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
196{ 196{
197 struct nouveau_device *device = nouveau_dev(dev); 197 struct nvif_device *device = &nouveau_drm(dev)->device;
198 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 198 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
199 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 199 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
200} 200}
201 201
202static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 202static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
203{ 203{
204 struct nouveau_device *device = nouveau_dev(dev); 204 struct nvif_device *device = &nouveau_drm(dev)->device;
205 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 205 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
206 return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 206 return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
207} 207}
208 208
209static inline void NVWriteVgaAttr(struct drm_device *dev, 209static inline void NVWriteVgaAttr(struct drm_device *dev,
210 int head, uint8_t index, uint8_t value) 210 int head, uint8_t index, uint8_t value)
211{ 211{
212 struct nouveau_device *device = nouveau_dev(dev); 212 struct nvif_device *device = &nouveau_drm(dev)->device;
213 if (NVGetEnablePalette(dev, head)) 213 if (NVGetEnablePalette(dev, head))
214 index &= ~0x20; 214 index &= ~0x20;
215 else 215 else
216 index |= 0x20; 216 index |= 0x20;
217 217
218 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 218 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
219 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); 219 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
220 nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); 220 nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
221} 221}
222 222
223static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 223static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
224 int head, uint8_t index) 224 int head, uint8_t index)
225{ 225{
226 struct nouveau_device *device = nouveau_dev(dev); 226 struct nvif_device *device = &nouveau_drm(dev)->device;
227 uint8_t val; 227 uint8_t val;
228 if (NVGetEnablePalette(dev, head)) 228 if (NVGetEnablePalette(dev, head))
229 index &= ~0x20; 229 index &= ~0x20;
230 else 230 else
231 index |= 0x20; 231 index |= 0x20;
232 232
233 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 233 nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
234 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); 234 nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
235 val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); 235 val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
236 return val; 236 return val;
237} 237}
238 238
@@ -259,11 +259,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
259static inline bool 259static inline bool
260nv_heads_tied(struct drm_device *dev) 260nv_heads_tied(struct drm_device *dev)
261{ 261{
262 struct nouveau_device *device = nouveau_dev(dev); 262 struct nvif_device *device = &nouveau_drm(dev)->device;
263 struct nouveau_drm *drm = nouveau_drm(dev); 263 struct nouveau_drm *drm = nouveau_drm(dev);
264 264
265 if (nv_device(drm->device)->chipset == 0x11) 265 if (drm->device.info.chipset == 0x11)
266 return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); 266 return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
267 267
268 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; 268 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
269} 269}
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
318 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, 318 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
319 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); 319 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
320 /* NV11 has independently lockable extended crtcs, except when tied */ 320 /* NV11 has independently lockable extended crtcs, except when tied */
321 if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev)) 321 if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev))
322 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, 322 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
323 lock ? NV_CIO_SR_LOCK_VALUE : 323 lock ? NV_CIO_SR_LOCK_VALUE :
324 NV_CIO_SR_UNLOCK_RW_VALUE); 324 NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
335{ 335{
336 struct nouveau_drm *drm = nouveau_drm(dev); 336 struct nouveau_drm *drm = nouveau_drm(dev);
337 337
338 return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; 338 return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
339} 339}
340 340
341static inline void 341static inline void
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
357 357
358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); 358 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
359 359
360 if (nv_device(drm->device)->card_type == NV_04) { 360 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) {
361 /* 361 /*
362 * Hilarious, the 24th bit doesn't want to stick to 362 * Hilarious, the 24th bit doesn't want to stick to
363 * PCRTC_START... 363 * PCRTC_START...
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
382 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 382 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); 383 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
384 384
385 if (nv_device(drm->device)->card_type == NV_40) 385 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
386 nv_fix_nv40_hw_cursor(dev, head); 386 nv_fix_nv40_hw_cursor(dev, head);
387} 387}
388 388
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
398 bpp = 8; 398 bpp = 8;
399 399
400 /* Alignment requirements taken from the Haiku driver */ 400 /* Alignment requirements taken from the Haiku driver */
401 if (nv_device(drm->device)->card_type == NV_04) 401 if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
402 mask = 128 / bpp - 1; 402 mask = 128 / bpp - 1;
403 else 403 else
404 mask = 512 / bpp - 1; 404 mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ab03f7719d2d..b36afcbbc83f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
96 uint32_t src_x, uint32_t src_y, 96 uint32_t src_x, uint32_t src_y,
97 uint32_t src_w, uint32_t src_h) 97 uint32_t src_w, uint32_t src_h)
98{ 98{
99 struct nouveau_device *dev = nouveau_dev(plane->dev); 99 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
100 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 100 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
101 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 101 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
102 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 102 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -117,7 +117,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
117 if (format > 0xffff) 117 if (format > 0xffff)
118 return -ERANGE; 118 return -ERANGE;
119 119
120 if (dev->chipset >= 0x30) { 120 if (dev->info.chipset >= 0x30) {
121 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) 121 if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
122 return -ERANGE; 122 return -ERANGE;
123 } else { 123 } else {
@@ -131,17 +131,17 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
131 131
132 nv_plane->cur = nv_fb->nvbo; 132 nv_plane->cur = nv_fb->nvbo;
133 133
134 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); 134 nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
135 nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); 135 nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
136 136
137 nv_wr32(dev, NV_PVIDEO_BASE(flip), 0); 137 nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
138 nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); 138 nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
139 nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); 139 nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
140 nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); 140 nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
141 nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); 141 nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
142 nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h); 142 nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
143 nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); 143 nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
144 nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); 144 nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
145 145
146 if (fb->pixel_format != DRM_FORMAT_UYVY) 146 if (fb->pixel_format != DRM_FORMAT_UYVY)
147 format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; 147 format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
@@ -153,14 +153,14 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
153 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; 153 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
154 154
155 if (fb->pixel_format == DRM_FORMAT_NV12) { 155 if (fb->pixel_format == DRM_FORMAT_NV12) {
156 nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); 156 nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
157 nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), 157 nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
158 nv_fb->nvbo->bo.offset + fb->offsets[1]); 158 nv_fb->nvbo->bo.offset + fb->offsets[1]);
159 } 159 }
160 nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format); 160 nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
161 nv_wr32(dev, NV_PVIDEO_STOP, 0); 161 nvif_wr32(dev, NV_PVIDEO_STOP, 0);
162 /* TODO: wait for vblank? */ 162 /* TODO: wait for vblank? */
163 nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1); 163 nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
164 nv_plane->flip = !flip; 164 nv_plane->flip = !flip;
165 165
166 if (cur) 166 if (cur)
@@ -172,10 +172,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
172static int 172static int
173nv10_disable_plane(struct drm_plane *plane) 173nv10_disable_plane(struct drm_plane *plane)
174{ 174{
175 struct nouveau_device *dev = nouveau_dev(plane->dev); 175 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
176 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 176 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
177 177
178 nv_wr32(dev, NV_PVIDEO_STOP, 1); 178 nvif_wr32(dev, NV_PVIDEO_STOP, 1);
179 if (nv_plane->cur) { 179 if (nv_plane->cur) {
180 nouveau_bo_unpin(nv_plane->cur); 180 nouveau_bo_unpin(nv_plane->cur);
181 nv_plane->cur = NULL; 181 nv_plane->cur = NULL;
@@ -195,24 +195,24 @@ nv_destroy_plane(struct drm_plane *plane)
195static void 195static void
196nv10_set_params(struct nouveau_plane *plane) 196nv10_set_params(struct nouveau_plane *plane)
197{ 197{
198 struct nouveau_device *dev = nouveau_dev(plane->base.dev); 198 struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
199 u32 luma = (plane->brightness - 512) << 16 | plane->contrast; 199 u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
200 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | 200 u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
201 (cos_mul(plane->hue, plane->saturation) & 0xffff); 201 (cos_mul(plane->hue, plane->saturation) & 0xffff);
202 u32 format = 0; 202 u32 format = 0;
203 203
204 nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma); 204 nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
205 nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma); 205 nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
206 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma); 206 nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
207 nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma); 207 nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
208 nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff); 208 nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
209 209
210 if (plane->cur) { 210 if (plane->cur) {
211 if (plane->iturbt_709) 211 if (plane->iturbt_709)
212 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; 212 format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
213 if (plane->colorkey & (1 << 24)) 213 if (plane->colorkey & (1 << 24))
214 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; 214 format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
215 nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip), 215 nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
216 NV_PVIDEO_FORMAT_MATRIX_ITURBT709 | 216 NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
217 NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY, 217 NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
218 format); 218 format);
@@ -256,7 +256,7 @@ static const struct drm_plane_funcs nv10_plane_funcs = {
256static void 256static void
257nv10_overlay_init(struct drm_device *device) 257nv10_overlay_init(struct drm_device *device)
258{ 258{
259 struct nouveau_device *dev = nouveau_dev(device); 259 struct nouveau_drm *drm = nouveau_drm(device);
260 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 260 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
261 int num_formats = ARRAY_SIZE(formats); 261 int num_formats = ARRAY_SIZE(formats);
262 int ret; 262 int ret;
@@ -264,7 +264,7 @@ nv10_overlay_init(struct drm_device *device)
264 if (!plane) 264 if (!plane)
265 return; 265 return;
266 266
267 switch (dev->chipset) { 267 switch (drm->device.info.chipset) {
268 case 0x10: 268 case 0x10:
269 case 0x11: 269 case 0x11:
270 case 0x15: 270 case 0x15:
@@ -333,7 +333,7 @@ cleanup:
333 drm_plane_cleanup(&plane->base); 333 drm_plane_cleanup(&plane->base);
334err: 334err:
335 kfree(plane); 335 kfree(plane);
336 nv_error(dev, "Failed to create plane\n"); 336 NV_ERROR(drm, "Failed to create plane\n");
337} 337}
338 338
339static int 339static int
@@ -343,7 +343,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
343 uint32_t src_x, uint32_t src_y, 343 uint32_t src_x, uint32_t src_y,
344 uint32_t src_w, uint32_t src_h) 344 uint32_t src_w, uint32_t src_h)
345{ 345{
346 struct nouveau_device *dev = nouveau_dev(plane->dev); 346 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
347 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 347 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
348 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 348 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
349 struct nouveau_bo *cur = nv_plane->cur; 349 struct nouveau_bo *cur = nv_plane->cur;
@@ -375,43 +375,43 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
375 375
376 nv_plane->cur = nv_fb->nvbo; 376 nv_plane->cur = nv_fb->nvbo;
377 377
378 nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); 378 nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
379 nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); 379 nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
380 nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); 380 nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
381 381
382 for (i = 0; i < 2; i++) { 382 for (i = 0; i < 2; i++) {
383 nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, 383 nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
384 nv_fb->nvbo->bo.offset); 384 nv_fb->nvbo->bo.offset);
385 nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); 385 nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
386 nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); 386 nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
387 } 387 }
388 nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); 388 nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
389 nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); 389 nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
390 nv_wr32(dev, NV_PVIDEO_STEP_SIZE, 390 nvif_wr32(dev, NV_PVIDEO_STEP_SIZE,
391 (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1))); 391 (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
392 392
393 /* It should be possible to convert hue/contrast to this */ 393 /* It should be possible to convert hue/contrast to this */
394 nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); 394 nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
395 nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); 395 nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
396 nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); 396 nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
397 nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); 397 nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
398 398
399 nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ 399 nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
400 nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ 400 nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
401 401
402 nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); 402 nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
403 nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); 403 nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
404 404
405 nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); 405 nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
406 406
407 if (nv_plane->colorkey & (1 << 24)) 407 if (nv_plane->colorkey & (1 << 24))
408 overlay |= 0x10; 408 overlay |= 0x10;
409 if (fb->pixel_format == DRM_FORMAT_YUYV) 409 if (fb->pixel_format == DRM_FORMAT_YUYV)
410 overlay |= 0x100; 410 overlay |= 0x100;
411 411
412 nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay); 412 nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
413 413
414 nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); 414 nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
415 415
416 if (cur) 416 if (cur)
417 nouveau_bo_unpin(cur); 417 nouveau_bo_unpin(cur);
@@ -422,13 +422,13 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
422static int 422static int
423nv04_disable_plane(struct drm_plane *plane) 423nv04_disable_plane(struct drm_plane *plane)
424{ 424{
425 struct nouveau_device *dev = nouveau_dev(plane->dev); 425 struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
426 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; 426 struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
427 427
428 nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); 428 nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
429 nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); 429 nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
430 nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); 430 nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
431 nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); 431 nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
432 if (nv_plane->cur) { 432 if (nv_plane->cur) {
433 nouveau_bo_unpin(nv_plane->cur); 433 nouveau_bo_unpin(nv_plane->cur);
434 nv_plane->cur = NULL; 434 nv_plane->cur = NULL;
@@ -447,7 +447,7 @@ static const struct drm_plane_funcs nv04_plane_funcs = {
447static void 447static void
448nv04_overlay_init(struct drm_device *device) 448nv04_overlay_init(struct drm_device *device)
449{ 449{
450 struct nouveau_device *dev = nouveau_dev(device); 450 struct nouveau_drm *drm = nouveau_drm(device);
451 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); 451 struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
452 int ret; 452 int ret;
453 453
@@ -483,15 +483,15 @@ cleanup:
483 drm_plane_cleanup(&plane->base); 483 drm_plane_cleanup(&plane->base);
484err: 484err:
485 kfree(plane); 485 kfree(plane);
486 nv_error(dev, "Failed to create plane\n"); 486 NV_ERROR(drm, "Failed to create plane\n");
487} 487}
488 488
489void 489void
490nouveau_overlay_init(struct drm_device *device) 490nouveau_overlay_init(struct drm_device *device)
491{ 491{
492 struct nouveau_device *dev = nouveau_dev(device); 492 struct nvif_device *dev = &nouveau_drm(device)->device;
493 if (dev->chipset < 0x10) 493 if (dev->info.chipset < 0x10)
494 nv04_overlay_init(device); 494 nv04_overlay_init(device);
495 else if (dev->chipset <= 0x40) 495 else if (dev->info.chipset <= 0x40)
496 nv10_overlay_init(device); 496 nv10_overlay_init(device);
497} 497}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 8667620b703a..8061d8d0ce79 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,8 +35,6 @@
35 35
36#include <drm/i2c/ch7006.h> 36#include <drm/i2c/ch7006.h>
37 37
38#include <subdev/i2c.h>
39
40static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { 38static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
41 { 39 {
42 { 40 {
@@ -56,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
56int nv04_tv_identify(struct drm_device *dev, int i2c_index) 54int nv04_tv_identify(struct drm_device *dev, int i2c_index)
57{ 55{
58 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 57 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
60 58
61 return i2c->identify(i2c, i2c_index, "TV encoder", 59 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL, NULL); 60 nv04_tv_encoder_info, NULL, NULL);
@@ -206,7 +204,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
206 struct drm_encoder *encoder; 204 struct drm_encoder *encoder;
207 struct drm_device *dev = connector->dev; 205 struct drm_device *dev = connector->dev;
208 struct nouveau_drm *drm = nouveau_drm(dev); 206 struct nouveau_drm *drm = nouveau_drm(dev);
209 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 207 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
210 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); 208 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
211 int type, ret; 209 int type, ret;
212 210
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 195bd8e86c6a..72d2ab04db47 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -34,11 +34,6 @@
34#include "hw.h" 34#include "hw.h"
35#include "tvnv17.h" 35#include "tvnv17.h"
36 36
37#include <core/device.h>
38
39#include <subdev/bios/gpio.h>
40#include <subdev/gpio.h>
41
42MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 37MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
43 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 38 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
44 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 39 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -51,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
51{ 46{
52 struct drm_device *dev = encoder->dev; 47 struct drm_device *dev = encoder->dev;
53 struct nouveau_drm *drm = nouveau_drm(dev); 48 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 49 struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
55 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 50 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
56 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, 51 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
57 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; 52 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -135,17 +130,17 @@ static bool
135get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 130get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
136{ 131{
137 struct nouveau_drm *drm = nouveau_drm(dev); 132 struct nouveau_drm *drm = nouveau_drm(dev);
138 struct nouveau_object *device = drm->device; 133 struct nvif_device *device = &drm->device;
139 134
140 /* Zotac FX5200 */ 135 /* Zotac FX5200 */
141 if (nv_device_match(device, 0x0322, 0x19da, 0x1035) || 136 if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) ||
142 nv_device_match(device, 0x0322, 0x19da, 0x2035)) { 137 nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) {
143 *pin_mask = 0xc; 138 *pin_mask = 0xc;
144 return false; 139 return false;
145 } 140 }
146 141
147 /* MSI nForce2 IGP */ 142 /* MSI nForce2 IGP */
148 if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) { 143 if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) {
149 *pin_mask = 0xc; 144 *pin_mask = 0xc;
150 return false; 145 return false;
151 } 146 }
@@ -167,8 +162,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
167 return connector_status_disconnected; 162 return connector_status_disconnected;
168 163
169 if (reliable) { 164 if (reliable) {
170 if (nv_device(drm->device)->chipset == 0x42 || 165 if (drm->device.info.chipset == 0x42 ||
171 nv_device(drm->device)->chipset == 0x43) 166 drm->device.info.chipset == 0x43)
172 tv_enc->pin_mask = 167 tv_enc->pin_mask =
173 nv42_tv_sample_load(encoder) >> 28 & 0xe; 168 nv42_tv_sample_load(encoder) >> 28 & 0xe;
174 else 169 else
@@ -375,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
375{ 370{
376 struct drm_device *dev = encoder->dev; 371 struct drm_device *dev = encoder->dev;
377 struct nouveau_drm *drm = nouveau_drm(dev); 372 struct nouveau_drm *drm = nouveau_drm(dev);
378 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 373 struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
379 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; 374 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
380 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 375 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
381 376
@@ -448,7 +443,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
448 /* Set the DACCLK register */ 443 /* Set the DACCLK register */
449 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; 444 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
450 445
451 if (nv_device(drm->device)->card_type == NV_40) 446 if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
452 dacclk |= 0x1a << 16; 447 dacclk |= 0x1a << 16;
453 448
454 if (tv_norm->kind == CTV_ENC_MODE) { 449 if (tv_norm->kind == CTV_ENC_MODE) {
@@ -505,7 +500,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
505 tv_regs->ptv_614 = 0x13; 500 tv_regs->ptv_614 = 0x13;
506 } 501 }
507 502
508 if (nv_device(drm->device)->card_type >= NV_30) { 503 if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
509 tv_regs->ptv_500 = 0xe8e0; 504 tv_regs->ptv_500 = 0xe8e0;
510 tv_regs->ptv_504 = 0x1710; 505 tv_regs->ptv_504 = 0x1710;
511 tv_regs->ptv_604 = 0x0; 506 tv_regs->ptv_604 = 0x0;
@@ -600,7 +595,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
600 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); 595 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
601 596
602 /* This could use refinement for flatpanels, but it should work */ 597 /* This could use refinement for flatpanels, but it should work */
603 if (nv_device(drm->device)->chipset < 0x44) 598 if (drm->device.info.chipset < 0x44)
604 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + 599 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
605 nv04_dac_output_offset(encoder), 600 nv04_dac_output_offset(encoder),
606 0xf0000000); 601 0xf0000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b331543a41b..225894cdcac2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -130,14 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, 130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val) 131 uint32_t val)
132{ 132{
133 struct nouveau_device *device = nouveau_dev(dev); 133 struct nvif_device *device = &nouveau_drm(dev)->device;
134 nv_wr32(device, reg, val); 134 nvif_wr32(device, reg, val);
135} 135}
136 136
137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
138{ 138{
139 struct nouveau_device *device = nouveau_dev(dev); 139 struct nvif_device *device = &nouveau_drm(dev)->device;
140 return nv_rd32(device, reg); 140 return nvif_rd32(device, reg);
141} 141}
142 142
143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, 143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b13f441c6431..615714c1727d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,16 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#include <core/object.h> 24#include <nvif/client.h>
25#include <core/client.h> 25#include <nvif/driver.h>
26#include <core/device.h> 26#include <nvif/ioctl.h>
27#include <core/class.h> 27#include <nvif/class.h>
28#include <core/mm.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33#include <engine/graph.h>
34 28
35#include "nouveau_drm.h" 29#include "nouveau_drm.h"
36#include "nouveau_dma.h" 30#include "nouveau_dma.h"
@@ -47,20 +41,20 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
47 struct nouveau_abi16 *abi16; 41 struct nouveau_abi16 *abi16;
48 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); 42 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
49 if (cli->abi16) { 43 if (cli->abi16) {
44 struct nv_device_v0 args = {
45 .device = ~0ULL,
46 };
47
50 INIT_LIST_HEAD(&abi16->channels); 48 INIT_LIST_HEAD(&abi16->channels);
51 abi16->client = nv_object(cli);
52 49
53 /* allocate device object targeting client's default 50 /* allocate device object targeting client's default
54 * device (ie. the one that belongs to the fd it 51 * device (ie. the one that belongs to the fd it
55 * opened) 52 * opened)
56 */ 53 */
57 if (nouveau_object_new(abi16->client, NVDRM_CLIENT, 54 if (nvif_device_init(&cli->base.base, NULL,
58 NVDRM_DEVICE, 0x0080, 55 NOUVEAU_ABI16_DEVICE, NV_DEVICE,
59 &(struct nv_device_class) { 56 &args, sizeof(args),
60 .device = ~0ULL, 57 &abi16->device) == 0)
61 },
62 sizeof(struct nv_device_class),
63 &abi16->device) == 0)
64 return cli->abi16; 58 return cli->abi16;
65 59
66 kfree(cli->abi16); 60 kfree(cli->abi16);
@@ -75,7 +69,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
75int 69int
76nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) 70nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
77{ 71{
78 struct nouveau_cli *cli = (void *)abi16->client; 72 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
79 mutex_unlock(&cli->mutex); 73 mutex_unlock(&cli->mutex);
80 return ret; 74 return ret;
81} 75}
@@ -83,21 +77,19 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
83u16 77u16
84nouveau_abi16_swclass(struct nouveau_drm *drm) 78nouveau_abi16_swclass(struct nouveau_drm *drm)
85{ 79{
86 switch (nv_device(drm->device)->card_type) { 80 switch (drm->device.info.family) {
87 case NV_04: 81 case NV_DEVICE_INFO_V0_TNT:
88 return 0x006e; 82 return 0x006e;
89 case NV_10: 83 case NV_DEVICE_INFO_V0_CELSIUS:
90 case NV_11: 84 case NV_DEVICE_INFO_V0_KELVIN:
91 case NV_20: 85 case NV_DEVICE_INFO_V0_RANKINE:
92 case NV_30: 86 case NV_DEVICE_INFO_V0_CURIE:
93 case NV_40:
94 return 0x016e; 87 return 0x016e;
95 case NV_50: 88 case NV_DEVICE_INFO_V0_TESLA:
96 return 0x506e; 89 return 0x506e;
97 case NV_C0: 90 case NV_DEVICE_INFO_V0_FERMI:
98 case NV_D0: 91 case NV_DEVICE_INFO_V0_KEPLER:
99 case NV_E0: 92 case NV_DEVICE_INFO_V0_MAXWELL:
100 case GM100:
101 return 0x906e; 93 return 0x906e;
102 } 94 }
103 95
@@ -140,7 +132,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
140 132
141 /* destroy channel object, all children will be killed too */ 133 /* destroy channel object, all children will be killed too */
142 if (chan->chan) { 134 if (chan->chan) {
143 abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff)); 135 abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
144 nouveau_channel_del(&chan->chan); 136 nouveau_channel_del(&chan->chan);
145 } 137 }
146 138
@@ -151,7 +143,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
151void 143void
152nouveau_abi16_fini(struct nouveau_abi16 *abi16) 144nouveau_abi16_fini(struct nouveau_abi16 *abi16)
153{ 145{
154 struct nouveau_cli *cli = (void *)abi16->client; 146 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
155 struct nouveau_abi16_chan *chan, *temp; 147 struct nouveau_abi16_chan *chan, *temp;
156 148
157 /* cleanup channels */ 149 /* cleanup channels */
@@ -160,7 +152,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
160 } 152 }
161 153
162 /* destroy the device object */ 154 /* destroy the device object */
163 nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE); 155 nvif_device_fini(&abi16->device);
164 156
165 kfree(cli->abi16); 157 kfree(cli->abi16);
166 cli->abi16 = NULL; 158 cli->abi16 = NULL;
@@ -169,30 +161,31 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
169int 161int
170nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) 162nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
171{ 163{
164 struct nouveau_cli *cli = nouveau_cli(file_priv);
172 struct nouveau_drm *drm = nouveau_drm(dev); 165 struct nouveau_drm *drm = nouveau_drm(dev);
173 struct nouveau_device *device = nv_device(drm->device); 166 struct nvif_device *device = &drm->device;
174 struct nouveau_timer *ptimer = nouveau_timer(device); 167 struct nouveau_timer *ptimer = nvkm_timer(device);
175 struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR); 168 struct nouveau_graph *graph = nvkm_gr(device);
176 struct drm_nouveau_getparam *getparam = data; 169 struct drm_nouveau_getparam *getparam = data;
177 170
178 switch (getparam->param) { 171 switch (getparam->param) {
179 case NOUVEAU_GETPARAM_CHIPSET_ID: 172 case NOUVEAU_GETPARAM_CHIPSET_ID:
180 getparam->value = device->chipset; 173 getparam->value = device->info.chipset;
181 break; 174 break;
182 case NOUVEAU_GETPARAM_PCI_VENDOR: 175 case NOUVEAU_GETPARAM_PCI_VENDOR:
183 if (nv_device_is_pci(device)) 176 if (nv_device_is_pci(nvkm_device(device)))
184 getparam->value = dev->pdev->vendor; 177 getparam->value = dev->pdev->vendor;
185 else 178 else
186 getparam->value = 0; 179 getparam->value = 0;
187 break; 180 break;
188 case NOUVEAU_GETPARAM_PCI_DEVICE: 181 case NOUVEAU_GETPARAM_PCI_DEVICE:
189 if (nv_device_is_pci(device)) 182 if (nv_device_is_pci(nvkm_device(device)))
190 getparam->value = dev->pdev->device; 183 getparam->value = dev->pdev->device;
191 else 184 else
192 getparam->value = 0; 185 getparam->value = 0;
193 break; 186 break;
194 case NOUVEAU_GETPARAM_BUS_TYPE: 187 case NOUVEAU_GETPARAM_BUS_TYPE:
195 if (!nv_device_is_pci(device)) 188 if (!nv_device_is_pci(nvkm_device(device)))
196 getparam->value = 3; 189 getparam->value = 3;
197 else 190 else
198 if (drm_pci_device_is_agp(dev)) 191 if (drm_pci_device_is_agp(dev))
@@ -225,7 +218,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
225 getparam->value = graph->units ? graph->units(graph) : 0; 218 getparam->value = graph->units ? graph->units(graph) : 0;
226 break; 219 break;
227 default: 220 default:
228 nv_debug(device, "unknown parameter %lld\n", getparam->param); 221 NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
229 return -EINVAL; 222 return -EINVAL;
230 } 223 }
231 224
@@ -246,10 +239,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
246 struct nouveau_drm *drm = nouveau_drm(dev); 239 struct nouveau_drm *drm = nouveau_drm(dev);
247 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 240 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
248 struct nouveau_abi16_chan *chan; 241 struct nouveau_abi16_chan *chan;
249 struct nouveau_client *client; 242 struct nvif_device *device;
250 struct nouveau_device *device;
251 struct nouveau_instmem *imem;
252 struct nouveau_fb *pfb;
253 int ret; 243 int ret;
254 244
255 if (unlikely(!abi16)) 245 if (unlikely(!abi16))
@@ -258,21 +248,18 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
258 if (!drm->channel) 248 if (!drm->channel)
259 return nouveau_abi16_put(abi16, -ENODEV); 249 return nouveau_abi16_put(abi16, -ENODEV);
260 250
261 client = nv_client(abi16->client); 251 device = &abi16->device;
262 device = nv_device(abi16->device);
263 imem = nouveau_instmem(device);
264 pfb = nouveau_fb(device);
265 252
266 /* hack to allow channel engine type specification on kepler */ 253 /* hack to allow channel engine type specification on kepler */
267 if (device->card_type >= NV_E0) { 254 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
268 if (init->fb_ctxdma_handle != ~0) 255 if (init->fb_ctxdma_handle != ~0)
269 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; 256 init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
270 else 257 else
271 init->fb_ctxdma_handle = init->tt_ctxdma_handle; 258 init->fb_ctxdma_handle = init->tt_ctxdma_handle;
272 259
273 /* allow flips to be executed if this is a graphics channel */ 260 /* allow flips to be executed if this is a graphics channel */
274 init->tt_ctxdma_handle = 0; 261 init->tt_ctxdma_handle = 0;
275 if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) 262 if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
276 init->tt_ctxdma_handle = 1; 263 init->tt_ctxdma_handle = 1;
277 } 264 }
278 265
@@ -293,13 +280,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
293 abi16->handles |= (1ULL << init->channel); 280 abi16->handles |= (1ULL << init->channel);
294 281
295 /* create channel object and initialise dma and fence management */ 282 /* create channel object and initialise dma and fence management */
296 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | 283 ret = nouveau_channel_new(drm, device,
297 init->channel, init->fb_ctxdma_handle, 284 NOUVEAU_ABI16_CHAN(init->channel),
285 init->fb_ctxdma_handle,
298 init->tt_ctxdma_handle, &chan->chan); 286 init->tt_ctxdma_handle, &chan->chan);
299 if (ret) 287 if (ret)
300 goto done; 288 goto done;
301 289
302 if (device->card_type >= NV_50) 290 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
303 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 291 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
304 NOUVEAU_GEM_DOMAIN_GART; 292 NOUVEAU_GEM_DOMAIN_GART;
305 else 293 else
@@ -308,10 +296,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
308 else 296 else
309 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; 297 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
310 298
311 if (device->card_type < NV_10) { 299 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
312 init->subchan[0].handle = 0x00000000; 300 init->subchan[0].handle = 0x00000000;
313 init->subchan[0].grclass = 0x0000; 301 init->subchan[0].grclass = 0x0000;
314 init->subchan[1].handle = NvSw; 302 init->subchan[1].handle = chan->chan->nvsw.handle;
315 init->subchan[1].grclass = 0x506e; 303 init->subchan[1].grclass = 0x506e;
316 init->nr_subchan = 2; 304 init->nr_subchan = 2;
317 } 305 }
@@ -324,8 +312,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
324 if (ret) 312 if (ret)
325 goto done; 313 goto done;
326 314
327 if (device->card_type >= NV_50) { 315 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
328 ret = nouveau_bo_vma_add(chan->ntfy, client->vm, 316 ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
329 &chan->ntfy_vma); 317 &chan->ntfy_vma);
330 if (ret) 318 if (ret)
331 goto done; 319 goto done;
@@ -343,6 +331,18 @@ done:
343 return nouveau_abi16_put(abi16, ret); 331 return nouveau_abi16_put(abi16, ret);
344} 332}
345 333
334static struct nouveau_abi16_chan *
335nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
336{
337 struct nouveau_abi16_chan *chan;
338
339 list_for_each_entry(chan, &abi16->channels, head) {
340 if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
341 return chan;
342 }
343
344 return NULL;
345}
346 346
347int 347int
348nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 348nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
@@ -350,28 +350,38 @@ nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
350 struct drm_nouveau_channel_free *req = data; 350 struct drm_nouveau_channel_free *req = data;
351 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 351 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
352 struct nouveau_abi16_chan *chan; 352 struct nouveau_abi16_chan *chan;
353 int ret = -ENOENT;
354 353
355 if (unlikely(!abi16)) 354 if (unlikely(!abi16))
356 return -ENOMEM; 355 return -ENOMEM;
357 356
358 list_for_each_entry(chan, &abi16->channels, head) { 357 chan = nouveau_abi16_chan(abi16, req->channel);
359 if (chan->chan->handle == (NVDRM_CHAN | req->channel)) { 358 if (!chan)
360 nouveau_abi16_chan_fini(abi16, chan); 359 return nouveau_abi16_put(abi16, -ENOENT);
361 return nouveau_abi16_put(abi16, 0); 360 nouveau_abi16_chan_fini(abi16, chan);
362 } 361 return nouveau_abi16_put(abi16, 0);
363 }
364
365 return nouveau_abi16_put(abi16, ret);
366} 362}
367 363
368int 364int
369nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 365nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
370{ 366{
371 struct drm_nouveau_grobj_alloc *init = data; 367 struct drm_nouveau_grobj_alloc *init = data;
368 struct {
369 struct nvif_ioctl_v0 ioctl;
370 struct nvif_ioctl_new_v0 new;
371 } args = {
372 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
373 .ioctl.type = NVIF_IOCTL_V0_NEW,
374 .ioctl.path_nr = 3,
375 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
376 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
377 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
378 .new.route = NVDRM_OBJECT_ABI16,
379 .new.handle = init->handle,
380 .new.oclass = init->class,
381 };
372 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 382 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
373 struct nouveau_drm *drm = nouveau_drm(dev); 383 struct nouveau_drm *drm = nouveau_drm(dev);
374 struct nouveau_object *object; 384 struct nvif_client *client;
375 int ret; 385 int ret;
376 386
377 if (unlikely(!abi16)) 387 if (unlikely(!abi16))
@@ -379,6 +389,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
379 389
380 if (init->handle == ~0) 390 if (init->handle == ~0)
381 return nouveau_abi16_put(abi16, -EINVAL); 391 return nouveau_abi16_put(abi16, -EINVAL);
392 client = nvif_client(nvif_object(&abi16->device));
382 393
383 /* compatibility with userspace that assumes 506e for all chipsets */ 394 /* compatibility with userspace that assumes 506e for all chipsets */
384 if (init->class == 0x506e) { 395 if (init->class == 0x506e) {
@@ -387,8 +398,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
387 return nouveau_abi16_put(abi16, 0); 398 return nouveau_abi16_put(abi16, 0);
388 } 399 }
389 400
390 ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel, 401 ret = nvif_client_ioctl(client, &args, sizeof(args));
391 init->handle, init->class, NULL, 0, &object);
392 return nouveau_abi16_put(abi16, ret); 402 return nouveau_abi16_put(abi16, ret);
393} 403}
394 404
@@ -396,29 +406,38 @@ int
396nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 406nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
397{ 407{
398 struct drm_nouveau_notifierobj_alloc *info = data; 408 struct drm_nouveau_notifierobj_alloc *info = data;
409 struct {
410 struct nvif_ioctl_v0 ioctl;
411 struct nvif_ioctl_new_v0 new;
412 struct nv_dma_v0 ctxdma;
413 } args = {
414 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
415 .ioctl.type = NVIF_IOCTL_V0_NEW,
416 .ioctl.path_nr = 3,
417 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
418 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
419 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
420 .new.route = NVDRM_OBJECT_ABI16,
421 .new.handle = info->handle,
422 .new.oclass = NV_DMA_IN_MEMORY,
423 };
399 struct nouveau_drm *drm = nouveau_drm(dev); 424 struct nouveau_drm *drm = nouveau_drm(dev);
400 struct nouveau_device *device = nv_device(drm->device);
401 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 425 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
402 struct nouveau_abi16_chan *chan = NULL, *temp; 426 struct nouveau_abi16_chan *chan;
403 struct nouveau_abi16_ntfy *ntfy; 427 struct nouveau_abi16_ntfy *ntfy;
404 struct nouveau_object *object; 428 struct nvif_device *device = &abi16->device;
405 struct nv_dma_class args = {}; 429 struct nvif_client *client;
406 int ret; 430 int ret;
407 431
408 if (unlikely(!abi16)) 432 if (unlikely(!abi16))
409 return -ENOMEM; 433 return -ENOMEM;
410 434
411 /* completely unnecessary for these chipsets... */ 435 /* completely unnecessary for these chipsets... */
412 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) 436 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
413 return nouveau_abi16_put(abi16, -EINVAL); 437 return nouveau_abi16_put(abi16, -EINVAL);
438 client = nvif_client(nvif_object(&abi16->device));
414 439
415 list_for_each_entry(temp, &abi16->channels, head) { 440 chan = nouveau_abi16_chan(abi16, info->channel);
416 if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
417 chan = temp;
418 break;
419 }
420 }
421
422 if (!chan) 441 if (!chan)
423 return nouveau_abi16_put(abi16, -ENOENT); 442 return nouveau_abi16_put(abi16, -ENOENT);
424 443
@@ -434,26 +453,29 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
434 if (ret) 453 if (ret)
435 goto done; 454 goto done;
436 455
437 args.start = ntfy->node->offset; 456 args.ctxdma.start = ntfy->node->offset;
438 args.limit = ntfy->node->offset + ntfy->node->length - 1; 457 args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
439 if (device->card_type >= NV_50) { 458 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
440 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 459 args.ctxdma.target = NV_DMA_V0_TARGET_VM;
441 args.start += chan->ntfy_vma.offset; 460 args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
442 args.limit += chan->ntfy_vma.offset; 461 args.ctxdma.start += chan->ntfy_vma.offset;
462 args.ctxdma.limit += chan->ntfy_vma.offset;
443 } else 463 } else
444 if (drm->agp.stat == ENABLED) { 464 if (drm->agp.stat == ENABLED) {
445 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; 465 args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
446 args.start += drm->agp.base + chan->ntfy->bo.offset; 466 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
447 args.limit += drm->agp.base + chan->ntfy->bo.offset; 467 args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
468 args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
469 client->super = true;
448 } else { 470 } else {
449 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; 471 args.ctxdma.target = NV_DMA_V0_TARGET_VM;
450 args.start += chan->ntfy->bo.offset; 472 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
451 args.limit += chan->ntfy->bo.offset; 473 args.ctxdma.start += chan->ntfy->bo.offset;
474 args.ctxdma.limit += chan->ntfy->bo.offset;
452 } 475 }
453 476
454 ret = nouveau_object_new(abi16->client, chan->chan->handle, 477 ret = nvif_client_ioctl(client, &args, sizeof(args));
455 ntfy->handle, 0x003d, &args, 478 client->super = false;
456 sizeof(args), &object);
457 if (ret) 479 if (ret)
458 goto done; 480 goto done;
459 481
@@ -469,28 +491,36 @@ int
469nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 491nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
470{ 492{
471 struct drm_nouveau_gpuobj_free *fini = data; 493 struct drm_nouveau_gpuobj_free *fini = data;
494 struct {
495 struct nvif_ioctl_v0 ioctl;
496 struct nvif_ioctl_del del;
497 } args = {
498 .ioctl.owner = NVDRM_OBJECT_ABI16,
499 .ioctl.type = NVIF_IOCTL_V0_DEL,
500 .ioctl.path_nr = 4,
501 .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
502 .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
503 .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
504 .ioctl.path[0] = fini->handle,
505 };
472 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 506 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
473 struct nouveau_abi16_chan *chan = NULL, *temp; 507 struct nouveau_abi16_chan *chan;
474 struct nouveau_abi16_ntfy *ntfy; 508 struct nouveau_abi16_ntfy *ntfy;
509 struct nvif_client *client;
475 int ret; 510 int ret;
476 511
477 if (unlikely(!abi16)) 512 if (unlikely(!abi16))
478 return -ENOMEM; 513 return -ENOMEM;
479 514
480 list_for_each_entry(temp, &abi16->channels, head) { 515 chan = nouveau_abi16_chan(abi16, fini->channel);
481 if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
482 chan = temp;
483 break;
484 }
485 }
486
487 if (!chan) 516 if (!chan)
488 return nouveau_abi16_put(abi16, -ENOENT); 517 return nouveau_abi16_put(abi16, -ENOENT);
518 client = nvif_client(nvif_object(&abi16->device));
489 519
490 /* synchronize with the user channel and destroy the gpu object */ 520 /* synchronize with the user channel and destroy the gpu object */
491 nouveau_channel_idle(chan->chan); 521 nouveau_channel_idle(chan->chan);
492 522
493 ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle); 523 ret = nvif_client_ioctl(client, &args, sizeof(args));
494 if (ret) 524 if (ret)
495 return nouveau_abi16_put(abi16, ret); 525 return nouveau_abi16_put(abi16, ret);
496 526
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 90004081a501..39844e6bfbff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -28,8 +28,7 @@ struct nouveau_abi16_chan {
28}; 28};
29 29
30struct nouveau_abi16 { 30struct nouveau_abi16 {
31 struct nouveau_object *client; 31 struct nvif_device device;
32 struct nouveau_object *device;
33 struct list_head channels; 32 struct list_head channels;
34 u64 handles; 33 u64 handles;
35}; 34};
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 51666daddb94..1f6f6ba6847a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -1,7 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2 2
3#include <core/device.h>
4
5#include "nouveau_drm.h" 3#include "nouveau_drm.h"
6#include "nouveau_agp.h" 4#include "nouveau_agp.h"
7#include "nouveau_reg.h" 5#include "nouveau_reg.h"
@@ -29,7 +27,7 @@ static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
29static unsigned long 27static unsigned long
30get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) 28get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
31{ 29{
32 struct nouveau_device *device = nv_device(drm->device); 30 struct nvif_device *device = &drm->device;
33 struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list; 31 struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
34 int agpmode = nouveau_agpmode; 32 int agpmode = nouveau_agpmode;
35 unsigned long mode = info->mode; 33 unsigned long mode = info->mode;
@@ -38,7 +36,7 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
38 * FW seems to be broken on nv18, it makes the card lock up 36 * FW seems to be broken on nv18, it makes the card lock up
39 * randomly. 37 * randomly.
40 */ 38 */
41 if (device->chipset == 0x18) 39 if (device->info.chipset == 0x18)
42 mode &= ~PCI_AGP_COMMAND_FW; 40 mode &= ~PCI_AGP_COMMAND_FW;
43 41
44 /* 42 /*
@@ -47,10 +45,10 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
47 while (agpmode == -1 && quirk->hostbridge_vendor) { 45 while (agpmode == -1 && quirk->hostbridge_vendor) {
48 if (info->id_vendor == quirk->hostbridge_vendor && 46 if (info->id_vendor == quirk->hostbridge_vendor &&
49 info->id_device == quirk->hostbridge_device && 47 info->id_device == quirk->hostbridge_device &&
50 device->pdev->vendor == quirk->chip_vendor && 48 nvkm_device(device)->pdev->vendor == quirk->chip_vendor &&
51 device->pdev->device == quirk->chip_device) { 49 nvkm_device(device)->pdev->device == quirk->chip_device) {
52 agpmode = quirk->mode; 50 agpmode = quirk->mode;
53 nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n", 51 NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
54 agpmode); 52 agpmode);
55 break; 53 break;
56 } 54 }
@@ -104,7 +102,7 @@ void
104nouveau_agp_reset(struct nouveau_drm *drm) 102nouveau_agp_reset(struct nouveau_drm *drm)
105{ 103{
106#if __OS_HAS_AGP 104#if __OS_HAS_AGP
107 struct nouveau_device *device = nv_device(drm->device); 105 struct nvif_device *device = &drm->device;
108 struct drm_device *dev = drm->dev; 106 struct drm_device *dev = drm->dev;
109 u32 save[2]; 107 u32 save[2];
110 int ret; 108 int ret;
@@ -115,7 +113,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
115 /* First of all, disable fast writes, otherwise if it's 113 /* First of all, disable fast writes, otherwise if it's
116 * already enabled in the AGP bridge and we disable the card's 114 * already enabled in the AGP bridge and we disable the card's
117 * AGP controller we might be locking ourselves out of it. */ 115 * AGP controller we might be locking ourselves out of it. */
118 if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) | 116 if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
119 dev->agp->mode) & PCI_AGP_COMMAND_FW) { 117 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
120 struct drm_agp_info info; 118 struct drm_agp_info info;
121 struct drm_agp_mode mode; 119 struct drm_agp_mode mode;
@@ -134,15 +132,15 @@ nouveau_agp_reset(struct nouveau_drm *drm)
134 132
135 133
136 /* clear busmaster bit, and disable AGP */ 134 /* clear busmaster bit, and disable AGP */
137 save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); 135 save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
138 nv_wr32(device, NV04_PBUS_PCI_NV_19, 0); 136 nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
139 137
140 /* reset PGRAPH, PFIFO and PTIMER */ 138 /* reset PGRAPH, PFIFO and PTIMER */
141 save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000); 139 save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
142 nv_mask(device, 0x000200, 0x00011100, save[1]); 140 nvif_mask(device, 0x000200, 0x00011100, save[1]);
143 141
144 /* and restore bustmaster bit (gives effect of resetting AGP) */ 142 /* and restore bustmaster bit (gives effect of resetting AGP) */
145 nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); 143 nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
146#endif 144#endif
147} 145}
148 146
@@ -150,7 +148,6 @@ void
150nouveau_agp_init(struct nouveau_drm *drm) 148nouveau_agp_init(struct nouveau_drm *drm)
151{ 149{
152#if __OS_HAS_AGP 150#if __OS_HAS_AGP
153 struct nouveau_device *device = nv_device(drm->device);
154 struct drm_device *dev = drm->dev; 151 struct drm_device *dev = drm->dev;
155 struct drm_agp_info info; 152 struct drm_agp_info info;
156 struct drm_agp_mode mode; 153 struct drm_agp_mode mode;
@@ -162,13 +159,13 @@ nouveau_agp_init(struct nouveau_drm *drm)
162 159
163 ret = drm_agp_acquire(dev); 160 ret = drm_agp_acquire(dev);
164 if (ret) { 161 if (ret) {
165 nv_error(device, "unable to acquire AGP: %d\n", ret); 162 NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
166 return; 163 return;
167 } 164 }
168 165
169 ret = drm_agp_info(dev, &info); 166 ret = drm_agp_info(dev, &info);
170 if (ret) { 167 if (ret) {
171 nv_error(device, "unable to get AGP info: %d\n", ret); 168 NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
172 return; 169 return;
173 } 170 }
174 171
@@ -177,7 +174,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
177 174
178 ret = drm_agp_enable(dev, mode); 175 ret = drm_agp_enable(dev, mode);
179 if (ret) { 176 if (ret) {
180 nv_error(device, "unable to enable AGP: %d\n", ret); 177 NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
181 return; 178 return;
182 } 179 }
183 180
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2c1e4aad7da3..e566c5b53651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,8 +40,8 @@ static int
40nv40_get_intensity(struct backlight_device *bd) 40nv40_get_intensity(struct backlight_device *bd)
41{ 41{
42 struct nouveau_drm *drm = bl_get_data(bd); 42 struct nouveau_drm *drm = bl_get_data(bd);
43 struct nouveau_device *device = nv_device(drm->device); 43 struct nvif_device *device = &drm->device;
44 int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) & 44 int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
45 NV40_PMC_BACKLIGHT_MASK) >> 16; 45 NV40_PMC_BACKLIGHT_MASK) >> 16;
46 46
47 return val; 47 return val;
@@ -51,11 +51,11 @@ static int
51nv40_set_intensity(struct backlight_device *bd) 51nv40_set_intensity(struct backlight_device *bd)
52{ 52{
53 struct nouveau_drm *drm = bl_get_data(bd); 53 struct nouveau_drm *drm = bl_get_data(bd);
54 struct nouveau_device *device = nv_device(drm->device); 54 struct nvif_device *device = &drm->device;
55 int val = bd->props.brightness; 55 int val = bd->props.brightness;
56 int reg = nv_rd32(device, NV40_PMC_BACKLIGHT); 56 int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
57 57
58 nv_wr32(device, NV40_PMC_BACKLIGHT, 58 nvif_wr32(device, NV40_PMC_BACKLIGHT,
59 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); 59 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
60 60
61 return 0; 61 return 0;
@@ -71,11 +71,11 @@ static int
71nv40_backlight_init(struct drm_connector *connector) 71nv40_backlight_init(struct drm_connector *connector)
72{ 72{
73 struct nouveau_drm *drm = nouveau_drm(connector->dev); 73 struct nouveau_drm *drm = nouveau_drm(connector->dev);
74 struct nouveau_device *device = nv_device(drm->device); 74 struct nvif_device *device = &drm->device;
75 struct backlight_properties props; 75 struct backlight_properties props;
76 struct backlight_device *bd; 76 struct backlight_device *bd;
77 77
78 if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 78 if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
79 return 0; 79 return 0;
80 80
81 memset(&props, 0, sizeof(struct backlight_properties)); 81 memset(&props, 0, sizeof(struct backlight_properties));
@@ -97,12 +97,12 @@ nv50_get_intensity(struct backlight_device *bd)
97{ 97{
98 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 98 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
99 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 99 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
100 struct nouveau_device *device = nv_device(drm->device); 100 struct nvif_device *device = &drm->device;
101 int or = nv_encoder->or; 101 int or = nv_encoder->or;
102 u32 div = 1025; 102 u32 div = 1025;
103 u32 val; 103 u32 val;
104 104
105 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); 105 val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
106 val &= NV50_PDISP_SOR_PWM_CTL_VAL; 106 val &= NV50_PDISP_SOR_PWM_CTL_VAL;
107 return ((val * 100) + (div / 2)) / div; 107 return ((val * 100) + (div / 2)) / div;
108} 108}
@@ -112,12 +112,12 @@ nv50_set_intensity(struct backlight_device *bd)
112{ 112{
113 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 113 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
114 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 114 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
115 struct nouveau_device *device = nv_device(drm->device); 115 struct nvif_device *device = &drm->device;
116 int or = nv_encoder->or; 116 int or = nv_encoder->or;
117 u32 div = 1025; 117 u32 div = 1025;
118 u32 val = (bd->props.brightness * div) / 100; 118 u32 val = (bd->props.brightness * div) / 100;
119 119
120 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), 120 nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
121 NV50_PDISP_SOR_PWM_CTL_NEW | val); 121 NV50_PDISP_SOR_PWM_CTL_NEW | val);
122 return 0; 122 return 0;
123} 123}
@@ -133,12 +133,12 @@ nva3_get_intensity(struct backlight_device *bd)
133{ 133{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nouveau_device *device = nv_device(drm->device); 136 struct nvif_device *device = &drm->device;
137 int or = nv_encoder->or; 137 int or = nv_encoder->or;
138 u32 div, val; 138 u32 div, val;
139 139
140 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 140 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
141 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); 141 val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
142 val &= NVA3_PDISP_SOR_PWM_CTL_VAL; 142 val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
143 if (div && div >= val) 143 if (div && div >= val)
144 return ((val * 100) + (div / 2)) / div; 144 return ((val * 100) + (div / 2)) / div;
@@ -151,14 +151,14 @@ nva3_set_intensity(struct backlight_device *bd)
151{ 151{
152 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 152 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
153 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 153 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
154 struct nouveau_device *device = nv_device(drm->device); 154 struct nvif_device *device = &drm->device;
155 int or = nv_encoder->or; 155 int or = nv_encoder->or;
156 u32 div, val; 156 u32 div, val;
157 157
158 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 158 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
159 val = (bd->props.brightness * div) / 100; 159 val = (bd->props.brightness * div) / 100;
160 if (div) { 160 if (div) {
161 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | 161 nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
162 NV50_PDISP_SOR_PWM_CTL_NEW | 162 NV50_PDISP_SOR_PWM_CTL_NEW |
163 NVA3_PDISP_SOR_PWM_CTL_UNK); 163 NVA3_PDISP_SOR_PWM_CTL_UNK);
164 return 0; 164 return 0;
@@ -177,7 +177,7 @@ static int
177nv50_backlight_init(struct drm_connector *connector) 177nv50_backlight_init(struct drm_connector *connector)
178{ 178{
179 struct nouveau_drm *drm = nouveau_drm(connector->dev); 179 struct nouveau_drm *drm = nouveau_drm(connector->dev);
180 struct nouveau_device *device = nv_device(drm->device); 180 struct nvif_device *device = &drm->device;
181 struct nouveau_encoder *nv_encoder; 181 struct nouveau_encoder *nv_encoder;
182 struct backlight_properties props; 182 struct backlight_properties props;
183 struct backlight_device *bd; 183 struct backlight_device *bd;
@@ -190,12 +190,12 @@ nv50_backlight_init(struct drm_connector *connector)
190 return -ENODEV; 190 return -ENODEV;
191 } 191 }
192 192
193 if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 193 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
194 return 0; 194 return 0;
195 195
196 if (device->chipset <= 0xa0 || 196 if (device->info.chipset <= 0xa0 ||
197 device->chipset == 0xaa || 197 device->info.chipset == 0xaa ||
198 device->chipset == 0xac) 198 device->info.chipset == 0xac)
199 ops = &nv50_bl_ops; 199 ops = &nv50_bl_ops;
200 else 200 else
201 ops = &nva3_bl_ops; 201 ops = &nva3_bl_ops;
@@ -218,7 +218,7 @@ int
218nouveau_backlight_init(struct drm_device *dev) 218nouveau_backlight_init(struct drm_device *dev)
219{ 219{
220 struct nouveau_drm *drm = nouveau_drm(dev); 220 struct nouveau_drm *drm = nouveau_drm(dev);
221 struct nouveau_device *device = nv_device(drm->device); 221 struct nvif_device *device = &drm->device;
222 struct drm_connector *connector; 222 struct drm_connector *connector;
223 223
224 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 224 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -226,13 +226,12 @@ nouveau_backlight_init(struct drm_device *dev)
226 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 226 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
227 continue; 227 continue;
228 228
229 switch (device->card_type) { 229 switch (device->info.family) {
230 case NV_40: 230 case NV_DEVICE_INFO_V0_CURIE:
231 return nv40_backlight_init(connector); 231 return nv40_backlight_init(connector);
232 case NV_50: 232 case NV_DEVICE_INFO_V0_TESLA:
233 case NV_C0: 233 case NV_DEVICE_INFO_V0_FERMI:
234 case NV_D0: 234 case NV_DEVICE_INFO_V0_KEPLER:
235 case NV_E0:
236 return nv50_backlight_init(connector); 235 return nv50_backlight_init(connector);
237 default: 236 default:
238 break; 237 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8268a4ccac15..dae2c96deef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,8 +22,6 @@
22 * SOFTWARE. 22 * SOFTWARE.
23 */ 23 */
24 24
25#include <subdev/bios.h>
26
27#include <drm/drmP.h> 25#include <drm/drmP.h>
28 26
29#include "nouveau_drm.h" 27#include "nouveau_drm.h"
@@ -217,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
217 */ 215 */
218 216
219 struct nouveau_drm *drm = nouveau_drm(dev); 217 struct nouveau_drm *drm = nouveau_drm(dev);
220 struct nouveau_device *device = nv_device(drm->device); 218 struct nvif_device *device = &drm->device;
221 struct nvbios *bios = &drm->vbios; 219 struct nvbios *bios = &drm->vbios;
222 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 220 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
223 uint32_t sel_clk_binding, sel_clk; 221 uint32_t sel_clk_binding, sel_clk;
@@ -240,7 +238,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
240 NV_INFO(drm, "Calling LVDS script %d:\n", script); 238 NV_INFO(drm, "Calling LVDS script %d:\n", script);
241 239
242 /* don't let script change pll->head binding */ 240 /* don't let script change pll->head binding */
243 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; 241 sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
244 242
245 if (lvds_ver < 0x30) 243 if (lvds_ver < 0x30)
246 ret = call_lvds_manufacturer_script(dev, dcbent, head, script); 244 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -252,7 +250,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
252 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 250 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
253 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 251 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
254 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ 252 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
255 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); 253 nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
256 254
257 return ret; 255 return ret;
258} 256}
@@ -320,7 +318,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
320static int 318static int
321get_fp_strap(struct drm_device *dev, struct nvbios *bios) 319get_fp_strap(struct drm_device *dev, struct nvbios *bios)
322{ 320{
323 struct nouveau_device *device = nouveau_dev(dev); 321 struct nvif_device *device = &nouveau_drm(dev)->device;
324 322
325 /* 323 /*
326 * The fp strap is normally dictated by the "User Strap" in 324 * The fp strap is normally dictated by the "User Strap" in
@@ -334,10 +332,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
334 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 332 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
335 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 333 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
336 334
337 if (device->card_type >= NV_50) 335 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
338 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 336 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
339 else 337 else
340 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; 338 return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
341} 339}
342 340
343static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) 341static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
@@ -636,7 +634,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
636 */ 634 */
637 635
638 struct nouveau_drm *drm = nouveau_drm(dev); 636 struct nouveau_drm *drm = nouveau_drm(dev);
639 struct nouveau_device *device = nv_device(drm->device); 637 struct nvif_device *device = &drm->device;
640 struct nvbios *bios = &drm->vbios; 638 struct nvbios *bios = &drm->vbios;
641 int cv = bios->chip_version; 639 int cv = bios->chip_version;
642 uint16_t clktable = 0, scriptptr; 640 uint16_t clktable = 0, scriptptr;
@@ -670,7 +668,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
670 } 668 }
671 669
672 /* don't let script change pll->head binding */ 670 /* don't let script change pll->head binding */
673 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; 671 sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
674 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); 672 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
675 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 673 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
676 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 674 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -1253,7 +1251,7 @@ olddcb_table(struct drm_device *dev)
1253 struct nouveau_drm *drm = nouveau_drm(dev); 1251 struct nouveau_drm *drm = nouveau_drm(dev);
1254 u8 *dcb = NULL; 1252 u8 *dcb = NULL;
1255 1253
1256 if (nv_device(drm->device)->card_type > NV_04) 1254 if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT)
1257 dcb = ROMPTR(dev, drm->vbios.data[0x36]); 1255 dcb = ROMPTR(dev, drm->vbios.data[0x36]);
1258 if (!dcb) { 1256 if (!dcb) {
1259 NV_WARN(drm, "No DCB data found in VBIOS\n"); 1257 NV_WARN(drm, "No DCB data found in VBIOS\n");
@@ -1399,6 +1397,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1399 uint32_t conn, uint32_t conf, struct dcb_output *entry) 1397 uint32_t conn, uint32_t conf, struct dcb_output *entry)
1400{ 1398{
1401 struct nouveau_drm *drm = nouveau_drm(dev); 1399 struct nouveau_drm *drm = nouveau_drm(dev);
1400 int link = 0;
1402 1401
1403 entry->type = conn & 0xf; 1402 entry->type = conn & 0xf;
1404 entry->i2c_index = (conn >> 4) & 0xf; 1403 entry->i2c_index = (conn >> 4) & 0xf;
@@ -1444,6 +1443,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1444 if (conf & 0x4) 1443 if (conf & 0x4)
1445 entry->lvdsconf.use_power_scripts = true; 1444 entry->lvdsconf.use_power_scripts = true;
1446 entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; 1445 entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
1446 link = entry->lvdsconf.sor.link;
1447 } 1447 }
1448 if (conf & mask) { 1448 if (conf & mask) {
1449 /* 1449 /*
@@ -1492,17 +1492,18 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1492 entry->dpconf.link_nr = 1; 1492 entry->dpconf.link_nr = 1;
1493 break; 1493 break;
1494 } 1494 }
1495 link = entry->dpconf.sor.link;
1495 break; 1496 break;
1496 case DCB_OUTPUT_TMDS: 1497 case DCB_OUTPUT_TMDS:
1497 if (dcb->version >= 0x40) { 1498 if (dcb->version >= 0x40) {
1498 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; 1499 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
1499 entry->extdev = (conf & 0x0000ff00) >> 8; 1500 entry->extdev = (conf & 0x0000ff00) >> 8;
1501 link = entry->tmdsconf.sor.link;
1500 } 1502 }
1501 else if (dcb->version >= 0x30) 1503 else if (dcb->version >= 0x30)
1502 entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; 1504 entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
1503 else if (dcb->version >= 0x22) 1505 else if (dcb->version >= 0x22)
1504 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 1506 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
1505
1506 break; 1507 break;
1507 case DCB_OUTPUT_EOL: 1508 case DCB_OUTPUT_EOL:
1508 /* weird g80 mobile type that "nv" treats as a terminator */ 1509 /* weird g80 mobile type that "nv" treats as a terminator */
@@ -1526,6 +1527,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1526 if (conf & 0x100000) 1527 if (conf & 0x100000)
1527 entry->i2c_upper_default = true; 1528 entry->i2c_upper_default = true;
1528 1529
1530 entry->hasht = (entry->location << 4) | entry->type;
1531 entry->hashm = (entry->heads << 8) | (link << 6) | entry->or;
1529 return true; 1532 return true;
1530} 1533}
1531 1534
@@ -1908,7 +1911,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
1908 */ 1911 */
1909 1912
1910 struct nouveau_drm *drm = nouveau_drm(dev); 1913 struct nouveau_drm *drm = nouveau_drm(dev);
1911 struct nouveau_device *device = nv_device(drm->device); 1914 struct nvif_device *device = &drm->device;
1912 uint8_t bytes_to_write; 1915 uint8_t bytes_to_write;
1913 uint16_t hwsq_entry_offset; 1916 uint16_t hwsq_entry_offset;
1914 int i; 1917 int i;
@@ -1931,15 +1934,15 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
1931 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; 1934 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
1932 1935
1933 /* set sequencer control */ 1936 /* set sequencer control */
1934 nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); 1937 nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
1935 bytes_to_write -= 4; 1938 bytes_to_write -= 4;
1936 1939
1937 /* write ucode */ 1940 /* write ucode */
1938 for (i = 0; i < bytes_to_write; i += 4) 1941 for (i = 0; i < bytes_to_write; i += 4)
1939 nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); 1942 nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
1940 1943
1941 /* twiddle NV_PBUS_DEBUG_4 */ 1944 /* twiddle NV_PBUS_DEBUG_4 */
1942 nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18); 1945 nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
1943 1946
1944 return 0; 1947 return 0;
1945} 1948}
@@ -2002,7 +2005,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
2002static bool NVInitVBIOS(struct drm_device *dev) 2005static bool NVInitVBIOS(struct drm_device *dev)
2003{ 2006{
2004 struct nouveau_drm *drm = nouveau_drm(dev); 2007 struct nouveau_drm *drm = nouveau_drm(dev);
2005 struct nouveau_bios *bios = nouveau_bios(drm->device); 2008 struct nouveau_bios *bios = nvkm_bios(&drm->device);
2006 struct nvbios *legacy = &drm->vbios; 2009 struct nvbios *legacy = &drm->vbios;
2007 2010
2008 memset(legacy, 0, sizeof(struct nvbios)); 2011 memset(legacy, 0, sizeof(struct nvbios));
@@ -2054,7 +2057,7 @@ nouveau_bios_posted(struct drm_device *dev)
2054 struct nouveau_drm *drm = nouveau_drm(dev); 2057 struct nouveau_drm *drm = nouveau_drm(dev);
2055 unsigned htotal; 2058 unsigned htotal;
2056 2059
2057 if (nv_device(drm->device)->card_type >= NV_50) 2060 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
2058 return true; 2061 return true;
2059 2062
2060 htotal = NVReadVgaCrtc(dev, 0, 0x06); 2063 htotal = NVReadVgaCrtc(dev, 0, 0x06);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ba29a701ca1d..da5d631aa5b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,13 +27,9 @@
27 * Jeremy Kolb <jkolb@brandeis.edu> 27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */ 28 */
29 29
30#include <core/engine.h> 30#include <linux/dma-mapping.h>
31#include <linux/swiotlb.h> 31#include <linux/swiotlb.h>
32 32
33#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36
37#include "nouveau_drm.h" 33#include "nouveau_drm.h"
38#include "nouveau_dma.h" 34#include "nouveau_dma.h"
39#include "nouveau_fence.h" 35#include "nouveau_fence.h"
@@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
52{ 48{
53 struct nouveau_drm *drm = nouveau_drm(dev); 49 struct nouveau_drm *drm = nouveau_drm(dev);
54 int i = reg - drm->tile.reg; 50 int i = reg - drm->tile.reg;
55 struct nouveau_fb *pfb = nouveau_fb(drm->device); 51 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
56 struct nouveau_fb_tile *tile = &pfb->tile.region[i]; 52 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine; 53 struct nouveau_engine *engine;
58 54
@@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
109 u32 size, u32 pitch, u32 flags) 105 u32 size, u32 pitch, u32 flags)
110{ 106{
111 struct nouveau_drm *drm = nouveau_drm(dev); 107 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nouveau_fb *pfb = nouveau_fb(drm->device); 108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
113 struct nouveau_drm_tile *tile, *found = NULL; 109 struct nouveau_drm_tile *tile, *found = NULL;
114 int i; 110 int i;
115 111
@@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
153 int *align, int *size) 149 int *align, int *size)
154{ 150{
155 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
156 struct nouveau_device *device = nv_device(drm->device); 152 struct nvif_device *device = &drm->device;
157 153
158 if (device->card_type < NV_50) { 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
159 if (nvbo->tile_mode) { 155 if (nvbo->tile_mode) {
160 if (device->chipset >= 0x40) { 156 if (device->info.chipset >= 0x40) {
161 *align = 65536; 157 *align = 65536;
162 *size = roundup(*size, 64 * nvbo->tile_mode); 158 *size = roundup(*size, 64 * nvbo->tile_mode);
163 159
164 } else if (device->chipset >= 0x30) { 160 } else if (device->info.chipset >= 0x30) {
165 *align = 32768; 161 *align = 32768;
166 *size = roundup(*size, 64 * nvbo->tile_mode); 162 *size = roundup(*size, 64 * nvbo->tile_mode);
167 163
168 } else if (device->chipset >= 0x20) { 164 } else if (device->info.chipset >= 0x20) {
169 *align = 16384; 165 *align = 16384;
170 *size = roundup(*size, 64 * nvbo->tile_mode); 166 *size = roundup(*size, 64 * nvbo->tile_mode);
171 167
172 } else if (device->chipset >= 0x10) { 168 } else if (device->info.chipset >= 0x10) {
173 *align = 16384; 169 *align = 16384;
174 *size = roundup(*size, 32 * nvbo->tile_mode); 170 *size = roundup(*size, 32 * nvbo->tile_mode);
175 } 171 }
@@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
196 int lpg_shift = 12; 192 int lpg_shift = 12;
197 int max_size; 193 int max_size;
198 194
199 if (drm->client.base.vm) 195 if (drm->client.vm)
200 lpg_shift = drm->client.base.vm->vmm->lpg_shift; 196 lpg_shift = drm->client.vm->vmm->lpg_shift;
201 max_size = INT_MAX & ~((1 << lpg_shift) - 1); 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
202 198
203 if (size <= 0 || size > max_size) { 199 if (size <= 0 || size > max_size) {
204 nv_warn(drm, "skipped size %x\n", (u32)size); 200 NV_WARN(drm, "skipped size %x\n", (u32)size);
205 return -EINVAL; 201 return -EINVAL;
206 } 202 }
207 203
@@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
219 nvbo->bo.bdev = &drm->ttm.bdev; 215 nvbo->bo.bdev = &drm->ttm.bdev;
220 216
221 nvbo->page_shift = 12; 217 nvbo->page_shift = 12;
222 if (drm->client.base.vm) { 218 if (drm->client.vm) {
223 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
224 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; 220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
225 } 221 }
226 222
227 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 223 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -261,11 +257,9 @@ static void
261set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 257set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
262{ 258{
263 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
264 struct nouveau_fb *pfb = nouveau_fb(drm->device); 260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
265 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
266 261
267 if ((nv_device(drm->device)->card_type == NV_10 || 262 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
268 nv_device(drm->device)->card_type == NV_11) &&
269 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
270 nvbo->bo.mem.num_pages < vram_pages / 4) { 264 nvbo->bo.mem.num_pages < vram_pages / 4) {
271 /* 265 /*
@@ -500,21 +494,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
500 man->default_caching = TTM_PL_FLAG_CACHED; 494 man->default_caching = TTM_PL_FLAG_CACHED;
501 break; 495 break;
502 case TTM_PL_VRAM: 496 case TTM_PL_VRAM:
503 if (nv_device(drm->device)->card_type >= NV_50) { 497 man->flags = TTM_MEMTYPE_FLAG_FIXED |
498 TTM_MEMTYPE_FLAG_MAPPABLE;
499 man->available_caching = TTM_PL_FLAG_UNCACHED |
500 TTM_PL_FLAG_WC;
501 man->default_caching = TTM_PL_FLAG_WC;
502
503 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
504 /* Some BARs do not support being ioremapped WC */
505 if (nvkm_bar(&drm->device)->iomap_uncached) {
506 man->available_caching = TTM_PL_FLAG_UNCACHED;
507 man->default_caching = TTM_PL_FLAG_UNCACHED;
508 }
509
504 man->func = &nouveau_vram_manager; 510 man->func = &nouveau_vram_manager;
505 man->io_reserve_fastpath = false; 511 man->io_reserve_fastpath = false;
506 man->use_io_reserve_lru = true; 512 man->use_io_reserve_lru = true;
507 } else { 513 } else {
508 man->func = &ttm_bo_manager_func; 514 man->func = &ttm_bo_manager_func;
509 } 515 }
510 man->flags = TTM_MEMTYPE_FLAG_FIXED |
511 TTM_MEMTYPE_FLAG_MAPPABLE;
512 man->available_caching = TTM_PL_FLAG_UNCACHED |
513 TTM_PL_FLAG_WC;
514 man->default_caching = TTM_PL_FLAG_WC;
515 break; 516 break;
516 case TTM_PL_TT: 517 case TTM_PL_TT:
517 if (nv_device(drm->device)->card_type >= NV_50) 518 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
518 man->func = &nouveau_gart_manager; 519 man->func = &nouveau_gart_manager;
519 else 520 else
520 if (drm->agp.stat != ENABLED) 521 if (drm->agp.stat != ENABLED)
@@ -763,9 +764,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
763 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 764 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
764 OUT_RING (chan, handle); 765 OUT_RING (chan, handle);
765 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 766 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
766 OUT_RING (chan, NvNotify0); 767 OUT_RING (chan, chan->drm->ntfy.handle);
767 OUT_RING (chan, NvDmaFB); 768 OUT_RING (chan, chan->vram.handle);
768 OUT_RING (chan, NvDmaFB); 769 OUT_RING (chan, chan->vram.handle);
769 } 770 }
770 771
771 return ret; 772 return ret;
@@ -852,7 +853,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
852 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 853 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
853 OUT_RING (chan, handle); 854 OUT_RING (chan, handle);
854 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); 855 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
855 OUT_RING (chan, NvNotify0); 856 OUT_RING (chan, chan->drm->ntfy.handle);
856 } 857 }
857 858
858 return ret; 859 return ret;
@@ -864,7 +865,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
864{ 865{
865 if (mem->mem_type == TTM_PL_TT) 866 if (mem->mem_type == TTM_PL_TT)
866 return NvDmaTT; 867 return NvDmaTT;
867 return NvDmaFB; 868 return chan->vram.handle;
868} 869}
869 870
870static int 871static int
@@ -922,12 +923,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
922 u64 size = (u64)mem->num_pages << PAGE_SHIFT; 923 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
923 int ret; 924 int ret;
924 925
925 ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, 926 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
926 NV_MEM_ACCESS_RW, &old_node->vma[0]); 927 NV_MEM_ACCESS_RW, &old_node->vma[0]);
927 if (ret) 928 if (ret)
928 return ret; 929 return ret;
929 930
930 ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, 931 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
931 NV_MEM_ACCESS_RW, &old_node->vma[1]); 932 NV_MEM_ACCESS_RW, &old_node->vma[1]);
932 if (ret) { 933 if (ret) {
933 nouveau_vm_put(&old_node->vma[0]); 934 nouveau_vm_put(&old_node->vma[0]);
@@ -945,6 +946,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
945{ 946{
946 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 947 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
947 struct nouveau_channel *chan = drm->ttm.chan; 948 struct nouveau_channel *chan = drm->ttm.chan;
949 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
948 struct nouveau_fence *fence; 950 struct nouveau_fence *fence;
949 int ret; 951 int ret;
950 952
@@ -952,13 +954,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
952 * old nouveau_mem node, these will get cleaned up after ttm has 954 * old nouveau_mem node, these will get cleaned up after ttm has
953 * destroyed the ttm_mem_reg 955 * destroyed the ttm_mem_reg
954 */ 956 */
955 if (nv_device(drm->device)->card_type >= NV_50) { 957 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
956 ret = nouveau_bo_move_prep(drm, bo, new_mem); 958 ret = nouveau_bo_move_prep(drm, bo, new_mem);
957 if (ret) 959 if (ret)
958 return ret; 960 return ret;
959 } 961 }
960 962
961 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); 963 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
962 ret = nouveau_fence_sync(bo->sync_obj, chan); 964 ret = nouveau_fence_sync(bo->sync_obj, chan);
963 if (ret == 0) { 965 if (ret == 0) {
964 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 966 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
@@ -973,7 +975,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
973 } 975 }
974 } 976 }
975 } 977 }
976 mutex_unlock(&chan->cli->mutex); 978 mutex_unlock(&cli->mutex);
977 return ret; 979 return ret;
978} 980}
979 981
@@ -1005,9 +1007,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1005 int ret; 1007 int ret;
1006 1008
1007 do { 1009 do {
1008 struct nouveau_object *object;
1009 struct nouveau_channel *chan; 1010 struct nouveau_channel *chan;
1010 u32 handle = (mthd->engine << 16) | mthd->oclass;
1011 1011
1012 if (mthd->engine) 1012 if (mthd->engine)
1013 chan = drm->cechan; 1013 chan = drm->cechan;
@@ -1016,13 +1016,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1016 if (chan == NULL) 1016 if (chan == NULL)
1017 continue; 1017 continue;
1018 1018
1019 ret = nouveau_object_new(nv_object(drm), chan->handle, handle, 1019 ret = nvif_object_init(chan->object, NULL,
1020 mthd->oclass, NULL, 0, &object); 1020 mthd->oclass | (mthd->engine << 16),
1021 mthd->oclass, NULL, 0,
1022 &drm->ttm.copy);
1021 if (ret == 0) { 1023 if (ret == 0) {
1022 ret = mthd->init(chan, handle); 1024 ret = mthd->init(chan, drm->ttm.copy.handle);
1023 if (ret) { 1025 if (ret) {
1024 nouveau_object_del(nv_object(drm), 1026 nvif_object_fini(&drm->ttm.copy);
1025 chan->handle, handle);
1026 continue; 1027 continue;
1027 } 1028 }
1028 1029
@@ -1135,7 +1136,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1135 if (new_mem->mem_type != TTM_PL_VRAM) 1136 if (new_mem->mem_type != TTM_PL_VRAM)
1136 return 0; 1137 return 0;
1137 1138
1138 if (nv_device(drm->device)->card_type >= NV_10) { 1139 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1139 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, 1140 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1140 nvbo->tile_mode, 1141 nvbo->tile_mode,
1141 nvbo->tile_flags); 1142 nvbo->tile_flags);
@@ -1166,7 +1167,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1166 struct nouveau_drm_tile *new_tile = NULL; 1167 struct nouveau_drm_tile *new_tile = NULL;
1167 int ret = 0; 1168 int ret = 0;
1168 1169
1169 if (nv_device(drm->device)->card_type < NV_50) { 1170 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1170 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1171 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1171 if (ret) 1172 if (ret)
1172 return ret; 1173 return ret;
@@ -1203,7 +1204,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1203 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1204 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1204 1205
1205out: 1206out:
1206 if (nv_device(drm->device)->card_type < NV_50) { 1207 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1207 if (ret) 1208 if (ret)
1208 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1209 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1209 else 1210 else
@@ -1249,16 +1250,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1249 mem->bus.is_iomem = !dev->agp->cant_use_aperture; 1250 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1250 } 1251 }
1251#endif 1252#endif
1252 if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) 1253 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1253 /* untiled */ 1254 /* untiled */
1254 break; 1255 break;
1255 /* fallthrough, tiled memory */ 1256 /* fallthrough, tiled memory */
1256 case TTM_PL_VRAM: 1257 case TTM_PL_VRAM:
1257 mem->bus.offset = mem->start << PAGE_SHIFT; 1258 mem->bus.offset = mem->start << PAGE_SHIFT;
1258 mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1); 1259 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
1259 mem->bus.is_iomem = true; 1260 mem->bus.is_iomem = true;
1260 if (nv_device(drm->device)->card_type >= NV_50) { 1261 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1261 struct nouveau_bar *bar = nouveau_bar(drm->device); 1262 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1262 1263
1263 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, 1264 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1264 &node->bar_vma); 1265 &node->bar_vma);
@@ -1278,7 +1279,7 @@ static void
1278nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1279nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1279{ 1280{
1280 struct nouveau_drm *drm = nouveau_bdev(bdev); 1281 struct nouveau_drm *drm = nouveau_bdev(bdev);
1281 struct nouveau_bar *bar = nouveau_bar(drm->device); 1282 struct nouveau_bar *bar = nvkm_bar(&drm->device);
1282 struct nouveau_mem *node = mem->mm_node; 1283 struct nouveau_mem *node = mem->mm_node;
1283 1284
1284 if (!node->bar_vma.node) 1285 if (!node->bar_vma.node)
@@ -1292,15 +1293,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1292{ 1293{
1293 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1294 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1294 struct nouveau_bo *nvbo = nouveau_bo(bo); 1295 struct nouveau_bo *nvbo = nouveau_bo(bo);
1295 struct nouveau_device *device = nv_device(drm->device); 1296 struct nvif_device *device = &drm->device;
1296 u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; 1297 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
1297 int ret; 1298 int ret;
1298 1299
1299 /* as long as the bo isn't in vram, and isn't tiled, we've got 1300 /* as long as the bo isn't in vram, and isn't tiled, we've got
1300 * nothing to do here. 1301 * nothing to do here.
1301 */ 1302 */
1302 if (bo->mem.mem_type != TTM_PL_VRAM) { 1303 if (bo->mem.mem_type != TTM_PL_VRAM) {
1303 if (nv_device(drm->device)->card_type < NV_50 || 1304 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1304 !nouveau_bo_tile_layout(nvbo)) 1305 !nouveau_bo_tile_layout(nvbo))
1305 return 0; 1306 return 0;
1306 1307
@@ -1315,7 +1316,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1315 } 1316 }
1316 1317
1317 /* make sure bo is in mappable vram */ 1318 /* make sure bo is in mappable vram */
1318 if (nv_device(drm->device)->card_type >= NV_50 || 1319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1319 bo->mem.start + bo->mem.num_pages < mappable) 1320 bo->mem.start + bo->mem.num_pages < mappable)
1320 return 0; 1321 return 0;
1321 1322
@@ -1333,6 +1334,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1333 struct nouveau_drm *drm; 1334 struct nouveau_drm *drm;
1334 struct nouveau_device *device; 1335 struct nouveau_device *device;
1335 struct drm_device *dev; 1336 struct drm_device *dev;
1337 struct device *pdev;
1336 unsigned i; 1338 unsigned i;
1337 int r; 1339 int r;
1338 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1340 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1349,8 +1351,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1349 } 1351 }
1350 1352
1351 drm = nouveau_bdev(ttm->bdev); 1353 drm = nouveau_bdev(ttm->bdev);
1352 device = nv_device(drm->device); 1354 device = nvkm_device(&drm->device);
1353 dev = drm->dev; 1355 dev = drm->dev;
1356 pdev = nv_device_base(device);
1354 1357
1355#if __OS_HAS_AGP 1358#if __OS_HAS_AGP
1356 if (drm->agp.stat == ENABLED) { 1359 if (drm->agp.stat == ENABLED) {
@@ -1370,17 +1373,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1370 } 1373 }
1371 1374
1372 for (i = 0; i < ttm->num_pages; i++) { 1375 for (i = 0; i < ttm->num_pages; i++) {
1373 ttm_dma->dma_address[i] = nv_device_map_page(device, 1376 dma_addr_t addr;
1374 ttm->pages[i]); 1377
1375 if (!ttm_dma->dma_address[i]) { 1378 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1379 DMA_BIDIRECTIONAL);
1380
1381 if (dma_mapping_error(pdev, addr)) {
1376 while (--i) { 1382 while (--i) {
1377 nv_device_unmap_page(device, 1383 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1378 ttm_dma->dma_address[i]); 1384 PAGE_SIZE, DMA_BIDIRECTIONAL);
1379 ttm_dma->dma_address[i] = 0; 1385 ttm_dma->dma_address[i] = 0;
1380 } 1386 }
1381 ttm_pool_unpopulate(ttm); 1387 ttm_pool_unpopulate(ttm);
1382 return -EFAULT; 1388 return -EFAULT;
1383 } 1389 }
1390
1391 ttm_dma->dma_address[i] = addr;
1384 } 1392 }
1385 return 0; 1393 return 0;
1386} 1394}
@@ -1392,6 +1400,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1392 struct nouveau_drm *drm; 1400 struct nouveau_drm *drm;
1393 struct nouveau_device *device; 1401 struct nouveau_device *device;
1394 struct drm_device *dev; 1402 struct drm_device *dev;
1403 struct device *pdev;
1395 unsigned i; 1404 unsigned i;
1396 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1405 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1397 1406
@@ -1399,8 +1408,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1399 return; 1408 return;
1400 1409
1401 drm = nouveau_bdev(ttm->bdev); 1410 drm = nouveau_bdev(ttm->bdev);
1402 device = nv_device(drm->device); 1411 device = nvkm_device(&drm->device);
1403 dev = drm->dev; 1412 dev = drm->dev;
1413 pdev = nv_device_base(device);
1404 1414
1405#if __OS_HAS_AGP 1415#if __OS_HAS_AGP
1406 if (drm->agp.stat == ENABLED) { 1416 if (drm->agp.stat == ENABLED) {
@@ -1418,7 +1428,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1418 1428
1419 for (i = 0; i < ttm->num_pages; i++) { 1429 for (i = 0; i < ttm->num_pages; i++) {
1420 if (ttm_dma->dma_address[i]) { 1430 if (ttm_dma->dma_address[i]) {
1421 nv_device_unmap_page(device, ttm_dma->dma_address[i]); 1431 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1432 DMA_BIDIRECTIONAL);
1422 } 1433 }
1423 } 1434 }
1424 1435
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index ccb6b452d6d0..99cd9e4a2aa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -22,16 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h> 25#include <nvif/os.h>
26#include <core/client.h> 26#include <nvif/class.h>
27#include <core/device.h>
28#include <core/class.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32#include <subdev/instmem.h>
33 27
34#include <engine/software.h> 28/*XXX*/
29#include <core/client.h>
35 30
36#include "nouveau_drm.h" 31#include "nouveau_drm.h"
37#include "nouveau_dma.h" 32#include "nouveau_dma.h"
@@ -47,7 +42,7 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
47int 42int
48nouveau_channel_idle(struct nouveau_channel *chan) 43nouveau_channel_idle(struct nouveau_channel *chan)
49{ 44{
50 struct nouveau_cli *cli = chan->cli; 45 struct nouveau_cli *cli = (void *)nvif_client(chan->object);
51 struct nouveau_fence *fence = NULL; 46 struct nouveau_fence *fence = NULL;
52 int ret; 47 int ret;
53 48
@@ -58,8 +53,8 @@ nouveau_channel_idle(struct nouveau_channel *chan)
58 } 53 }
59 54
60 if (ret) 55 if (ret)
61 NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n", 56 NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
62 chan->handle, cli->base.name); 57 chan->object->handle, nvkm_client(&cli->base)->name);
63 return ret; 58 return ret;
64} 59}
65 60
@@ -68,36 +63,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
68{ 63{
69 struct nouveau_channel *chan = *pchan; 64 struct nouveau_channel *chan = *pchan;
70 if (chan) { 65 if (chan) {
71 struct nouveau_object *client = nv_object(chan->cli);
72 if (chan->fence) { 66 if (chan->fence) {
73 nouveau_channel_idle(chan); 67 nouveau_channel_idle(chan);
74 nouveau_fence(chan->drm)->context_del(chan); 68 nouveau_fence(chan->drm)->context_del(chan);
75 } 69 }
76 nouveau_object_del(client, NVDRM_DEVICE, chan->handle); 70 nvif_object_fini(&chan->nvsw);
77 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); 71 nvif_object_fini(&chan->gart);
72 nvif_object_fini(&chan->vram);
73 nvif_object_ref(NULL, &chan->object);
74 nvif_object_fini(&chan->push.ctxdma);
78 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 75 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
79 nouveau_bo_unmap(chan->push.buffer); 76 nouveau_bo_unmap(chan->push.buffer);
80 if (chan->push.buffer && chan->push.buffer->pin_refcnt) 77 if (chan->push.buffer && chan->push.buffer->pin_refcnt)
81 nouveau_bo_unpin(chan->push.buffer); 78 nouveau_bo_unpin(chan->push.buffer);
82 nouveau_bo_ref(NULL, &chan->push.buffer); 79 nouveau_bo_ref(NULL, &chan->push.buffer);
80 nvif_device_ref(NULL, &chan->device);
83 kfree(chan); 81 kfree(chan);
84 } 82 }
85 *pchan = NULL; 83 *pchan = NULL;
86} 84}
87 85
88static int 86static int
89nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, 87nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
90 u32 parent, u32 handle, u32 size, 88 u32 handle, u32 size, struct nouveau_channel **pchan)
91 struct nouveau_channel **pchan)
92{ 89{
93 struct nouveau_device *device = nv_device(drm->device); 90 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
94 struct nouveau_instmem *imem = nouveau_instmem(device); 91 struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
95 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); 92 struct nv_dma_v0 args = {};
96 struct nouveau_fb *pfb = nouveau_fb(device);
97 struct nouveau_client *client = &cli->base;
98 struct nv_dma_class args = {};
99 struct nouveau_channel *chan; 93 struct nouveau_channel *chan;
100 struct nouveau_object *push;
101 u32 target; 94 u32 target;
102 int ret; 95 int ret;
103 96
@@ -105,9 +98,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
105 if (!chan) 98 if (!chan)
106 return -ENOMEM; 99 return -ENOMEM;
107 100
108 chan->cli = cli; 101 nvif_device_ref(device, &chan->device);
109 chan->drm = drm; 102 chan->drm = drm;
110 chan->handle = handle;
111 103
112 /* allocate memory for dma push buffer */ 104 /* allocate memory for dma push buffer */
113 target = TTM_PL_FLAG_TT; 105 target = TTM_PL_FLAG_TT;
@@ -132,51 +124,54 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
132 * we be able to call out to other (indirect) push buffers 124 * we be able to call out to other (indirect) push buffers
133 */ 125 */
134 chan->push.vma.offset = chan->push.buffer->bo.offset; 126 chan->push.vma.offset = chan->push.buffer->bo.offset;
135 chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
136 127
137 if (device->card_type >= NV_50) { 128 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
138 ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, 129 ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
139 &chan->push.vma); 130 &chan->push.vma);
140 if (ret) { 131 if (ret) {
141 nouveau_channel_del(pchan); 132 nouveau_channel_del(pchan);
142 return ret; 133 return ret;
143 } 134 }
144 135
145 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 136 args.target = NV_DMA_V0_TARGET_VM;
137 args.access = NV_DMA_V0_ACCESS_VM;
146 args.start = 0; 138 args.start = 0;
147 args.limit = client->vm->vmm->limit - 1; 139 args.limit = cli->vm->vmm->limit - 1;
148 } else 140 } else
149 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { 141 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
150 u64 limit = pfb->ram->size - imem->reserved - 1; 142 if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
151 if (device->card_type == NV_04) {
152 /* nv04 vram pushbuf hack, retarget to its location in 143 /* nv04 vram pushbuf hack, retarget to its location in
153 * the framebuffer bar rather than direct vram access.. 144 * the framebuffer bar rather than direct vram access..
154 * nfi why this exists, it came from the -nv ddx. 145 * nfi why this exists, it came from the -nv ddx.
155 */ 146 */
156 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; 147 args.target = NV_DMA_V0_TARGET_PCI;
157 args.start = nv_device_resource_start(device, 1); 148 args.access = NV_DMA_V0_ACCESS_RDWR;
158 args.limit = args.start + limit; 149 args.start = nv_device_resource_start(nvkm_device(device), 1);
150 args.limit = args.start + device->info.ram_user - 1;
159 } else { 151 } else {
160 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; 152 args.target = NV_DMA_V0_TARGET_VRAM;
153 args.access = NV_DMA_V0_ACCESS_RDWR;
161 args.start = 0; 154 args.start = 0;
162 args.limit = limit; 155 args.limit = device->info.ram_user - 1;
163 } 156 }
164 } else { 157 } else {
165 if (chan->drm->agp.stat == ENABLED) { 158 if (chan->drm->agp.stat == ENABLED) {
166 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; 159 args.target = NV_DMA_V0_TARGET_AGP;
160 args.access = NV_DMA_V0_ACCESS_RDWR;
167 args.start = chan->drm->agp.base; 161 args.start = chan->drm->agp.base;
168 args.limit = chan->drm->agp.base + 162 args.limit = chan->drm->agp.base +
169 chan->drm->agp.size - 1; 163 chan->drm->agp.size - 1;
170 } else { 164 } else {
171 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; 165 args.target = NV_DMA_V0_TARGET_VM;
166 args.access = NV_DMA_V0_ACCESS_RDWR;
172 args.start = 0; 167 args.start = 0;
173 args.limit = vmm->limit - 1; 168 args.limit = vmm->limit - 1;
174 } 169 }
175 } 170 }
176 171
177 ret = nouveau_object_new(nv_object(chan->cli), parent, 172 ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
178 chan->push.handle, 0x0002, 173 (handle & 0xffff), NV_DMA_FROM_MEMORY,
179 &args, sizeof(args), &push); 174 &args, sizeof(args), &chan->push.ctxdma);
180 if (ret) { 175 if (ret) {
181 nouveau_channel_del(pchan); 176 nouveau_channel_del(pchan);
182 return ret; 177 return ret;
@@ -186,38 +181,56 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
186} 181}
187 182
188static int 183static int
189nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, 184nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
190 u32 parent, u32 handle, u32 engine, 185 u32 handle, u32 engine, struct nouveau_channel **pchan)
191 struct nouveau_channel **pchan)
192{ 186{
193 static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS, 187 static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
194 NVC0_CHANNEL_IND_CLASS, 188 FERMI_CHANNEL_GPFIFO,
195 NV84_CHANNEL_IND_CLASS, 189 G82_CHANNEL_GPFIFO,
196 NV50_CHANNEL_IND_CLASS, 190 NV50_CHANNEL_GPFIFO,
197 0 }; 191 0 };
198 const u16 *oclass = oclasses; 192 const u16 *oclass = oclasses;
199 struct nve0_channel_ind_class args; 193 union {
194 struct nv50_channel_gpfifo_v0 nv50;
195 struct kepler_channel_gpfifo_a_v0 kepler;
196 } args, *retn;
200 struct nouveau_channel *chan; 197 struct nouveau_channel *chan;
198 u32 size;
201 int ret; 199 int ret;
202 200
203 /* allocate dma push buffer */ 201 /* allocate dma push buffer */
204 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); 202 ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan);
205 *pchan = chan; 203 *pchan = chan;
206 if (ret) 204 if (ret)
207 return ret; 205 return ret;
208 206
209 /* create channel object */ 207 /* create channel object */
210 args.pushbuf = chan->push.handle;
211 args.ioffset = 0x10000 + chan->push.vma.offset;
212 args.ilength = 0x02000;
213 args.engine = engine;
214
215 do { 208 do {
216 ret = nouveau_object_new(nv_object(cli), parent, handle, 209 if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
217 *oclass++, &args, sizeof(args), 210 args.kepler.version = 0;
218 &chan->object); 211 args.kepler.engine = engine;
219 if (ret == 0) 212 args.kepler.pushbuf = chan->push.ctxdma.handle;
213 args.kepler.ilength = 0x02000;
214 args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
215 size = sizeof(args.kepler);
216 } else {
217 args.nv50.version = 0;
218 args.nv50.pushbuf = chan->push.ctxdma.handle;
219 args.nv50.ilength = 0x02000;
220 args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
221 size = sizeof(args.nv50);
222 }
223
224 ret = nvif_object_new(nvif_object(device), handle, *oclass++,
225 &args, size, &chan->object);
226 if (ret == 0) {
227 retn = chan->object->data;
228 if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
229 chan->chid = retn->kepler.chid;
230 else
231 chan->chid = retn->nv50.chid;
220 return ret; 232 return ret;
233 }
221 } while (*oclass); 234 } while (*oclass);
222 235
223 nouveau_channel_del(pchan); 236 nouveau_channel_del(pchan);
@@ -225,35 +238,38 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
225} 238}
226 239
227static int 240static int
228nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, 241nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
229 u32 parent, u32 handle, struct nouveau_channel **pchan) 242 u32 handle, struct nouveau_channel **pchan)
230{ 243{
231 static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS, 244 static const u16 oclasses[] = { NV40_CHANNEL_DMA,
232 NV17_CHANNEL_DMA_CLASS, 245 NV17_CHANNEL_DMA,
233 NV10_CHANNEL_DMA_CLASS, 246 NV10_CHANNEL_DMA,
234 NV03_CHANNEL_DMA_CLASS, 247 NV03_CHANNEL_DMA,
235 0 }; 248 0 };
236 const u16 *oclass = oclasses; 249 const u16 *oclass = oclasses;
237 struct nv03_channel_dma_class args; 250 struct nv03_channel_dma_v0 args, *retn;
238 struct nouveau_channel *chan; 251 struct nouveau_channel *chan;
239 int ret; 252 int ret;
240 253
241 /* allocate dma push buffer */ 254 /* allocate dma push buffer */
242 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); 255 ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan);
243 *pchan = chan; 256 *pchan = chan;
244 if (ret) 257 if (ret)
245 return ret; 258 return ret;
246 259
247 /* create channel object */ 260 /* create channel object */
248 args.pushbuf = chan->push.handle; 261 args.version = 0;
262 args.pushbuf = chan->push.ctxdma.handle;
249 args.offset = chan->push.vma.offset; 263 args.offset = chan->push.vma.offset;
250 264
251 do { 265 do {
252 ret = nouveau_object_new(nv_object(cli), parent, handle, 266 ret = nvif_object_new(nvif_object(device), handle, *oclass++,
253 *oclass++, &args, sizeof(args), 267 &args, sizeof(args), &chan->object);
254 &chan->object); 268 if (ret == 0) {
255 if (ret == 0) 269 retn = chan->object->data;
270 chan->chid = retn->chid;
256 return ret; 271 return ret;
272 }
257 } while (ret && *oclass); 273 } while (ret && *oclass);
258 274
259 nouveau_channel_del(pchan); 275 nouveau_channel_del(pchan);
@@ -263,60 +279,63 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
263static int 279static int
264nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) 280nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
265{ 281{
266 struct nouveau_client *client = nv_client(chan->cli); 282 struct nvif_device *device = chan->device;
267 struct nouveau_device *device = nv_device(chan->drm->device); 283 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
268 struct nouveau_instmem *imem = nouveau_instmem(device); 284 struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
269 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
270 struct nouveau_fb *pfb = nouveau_fb(device);
271 struct nouveau_software_chan *swch; 285 struct nouveau_software_chan *swch;
272 struct nouveau_object *object; 286 struct nv_dma_v0 args = {};
273 struct nv_dma_class args = {};
274 int ret, i; 287 int ret, i;
275 288
289 nvif_object_map(chan->object);
290
276 /* allocate dma objects to cover all allowed vram, and gart */ 291 /* allocate dma objects to cover all allowed vram, and gart */
277 if (device->card_type < NV_C0) { 292 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
278 if (device->card_type >= NV_50) { 293 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
279 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 294 args.target = NV_DMA_V0_TARGET_VM;
295 args.access = NV_DMA_V0_ACCESS_VM;
280 args.start = 0; 296 args.start = 0;
281 args.limit = client->vm->vmm->limit - 1; 297 args.limit = cli->vm->vmm->limit - 1;
282 } else { 298 } else {
283 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; 299 args.target = NV_DMA_V0_TARGET_VRAM;
300 args.access = NV_DMA_V0_ACCESS_RDWR;
284 args.start = 0; 301 args.start = 0;
285 args.limit = pfb->ram->size - imem->reserved - 1; 302 args.limit = device->info.ram_user - 1;
286 } 303 }
287 304
288 ret = nouveau_object_new(nv_object(client), chan->handle, vram, 305 ret = nvif_object_init(chan->object, NULL, vram,
289 0x003d, &args, sizeof(args), &object); 306 NV_DMA_IN_MEMORY, &args,
307 sizeof(args), &chan->vram);
290 if (ret) 308 if (ret)
291 return ret; 309 return ret;
292 310
293 if (device->card_type >= NV_50) { 311 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
294 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; 312 args.target = NV_DMA_V0_TARGET_VM;
313 args.access = NV_DMA_V0_ACCESS_VM;
295 args.start = 0; 314 args.start = 0;
296 args.limit = client->vm->vmm->limit - 1; 315 args.limit = cli->vm->vmm->limit - 1;
297 } else 316 } else
298 if (chan->drm->agp.stat == ENABLED) { 317 if (chan->drm->agp.stat == ENABLED) {
299 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; 318 args.target = NV_DMA_V0_TARGET_AGP;
319 args.access = NV_DMA_V0_ACCESS_RDWR;
300 args.start = chan->drm->agp.base; 320 args.start = chan->drm->agp.base;
301 args.limit = chan->drm->agp.base + 321 args.limit = chan->drm->agp.base +
302 chan->drm->agp.size - 1; 322 chan->drm->agp.size - 1;
303 } else { 323 } else {
304 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; 324 args.target = NV_DMA_V0_TARGET_VM;
325 args.access = NV_DMA_V0_ACCESS_RDWR;
305 args.start = 0; 326 args.start = 0;
306 args.limit = vmm->limit - 1; 327 args.limit = vmm->limit - 1;
307 } 328 }
308 329
309 ret = nouveau_object_new(nv_object(client), chan->handle, gart, 330 ret = nvif_object_init(chan->object, NULL, gart,
310 0x003d, &args, sizeof(args), &object); 331 NV_DMA_IN_MEMORY, &args,
332 sizeof(args), &chan->gart);
311 if (ret) 333 if (ret)
312 return ret; 334 return ret;
313
314 chan->vram = vram;
315 chan->gart = gart;
316 } 335 }
317 336
318 /* initialise dma tracking parameters */ 337 /* initialise dma tracking parameters */
319 switch (nv_hclass(chan->object) & 0x00ff) { 338 switch (chan->object->oclass & 0x00ff) {
320 case 0x006b: 339 case 0x006b:
321 case 0x006e: 340 case 0x006e:
322 chan->user_put = 0x40; 341 chan->user_put = 0x40;
@@ -347,13 +366,13 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
347 OUT_RING(chan, 0x00000000); 366 OUT_RING(chan, 0x00000000);
348 367
349 /* allocate software object class (used for fences on <= nv05) */ 368 /* allocate software object class (used for fences on <= nv05) */
350 if (device->card_type < NV_10) { 369 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
351 ret = nouveau_object_new(nv_object(client), chan->handle, 370 ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
352 NvSw, 0x006e, NULL, 0, &object); 371 NULL, 0, &chan->nvsw);
353 if (ret) 372 if (ret)
354 return ret; 373 return ret;
355 374
356 swch = (void *)object->parent; 375 swch = (void *)nvkm_object(&chan->nvsw)->parent;
357 swch->flip = nouveau_flip_complete; 376 swch->flip = nouveau_flip_complete;
358 swch->flip_data = chan; 377 swch->flip_data = chan;
359 378
@@ -362,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
362 return ret; 381 return ret;
363 382
364 BEGIN_NV04(chan, NvSubSw, 0x0000, 1); 383 BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
365 OUT_RING (chan, NvSw); 384 OUT_RING (chan, chan->nvsw.handle);
366 FIRE_RING (chan); 385 FIRE_RING (chan);
367 } 386 }
368 387
@@ -371,25 +390,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
371} 390}
372 391
373int 392int
374nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, 393nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
375 u32 parent, u32 handle, u32 arg0, u32 arg1, 394 u32 handle, u32 arg0, u32 arg1,
376 struct nouveau_channel **pchan) 395 struct nouveau_channel **pchan)
377{ 396{
397 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
378 int ret; 398 int ret;
379 399
380 ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); 400 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
381 if (ret) { 401 if (ret) {
382 NV_DEBUG(cli, "ib channel create, %d\n", ret); 402 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
383 ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); 403 ret = nouveau_channel_dma(drm, device, handle, pchan);
384 if (ret) { 404 if (ret) {
385 NV_DEBUG(cli, "dma channel create, %d\n", ret); 405 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
386 return ret; 406 return ret;
387 } 407 }
388 } 408 }
389 409
390 ret = nouveau_channel_init(*pchan, arg0, arg1); 410 ret = nouveau_channel_init(*pchan, arg0, arg1);
391 if (ret) { 411 if (ret) {
392 NV_ERROR(cli, "channel failed to initialise, %d\n", ret); 412 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
393 nouveau_channel_del(pchan); 413 nouveau_channel_del(pchan);
394 return ret; 414 return ret;
395 } 415 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 40f97e2c47b6..20163709d608 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,20 +1,23 @@
1#ifndef __NOUVEAU_CHAN_H__ 1#ifndef __NOUVEAU_CHAN_H__
2#define __NOUVEAU_CHAN_H__ 2#define __NOUVEAU_CHAN_H__
3 3
4struct nouveau_cli; 4#include <nvif/object.h>
5struct nvif_device;
5 6
6struct nouveau_channel { 7struct nouveau_channel {
7 struct nouveau_cli *cli; 8 struct nvif_device *device;
8 struct nouveau_drm *drm; 9 struct nouveau_drm *drm;
9 10
10 u32 handle; 11 int chid;
11 u32 vram; 12
12 u32 gart; 13 struct nvif_object vram;
14 struct nvif_object gart;
15 struct nvif_object nvsw;
13 16
14 struct { 17 struct {
15 struct nouveau_bo *buffer; 18 struct nouveau_bo *buffer;
16 struct nouveau_vma vma; 19 struct nouveau_vma vma;
17 u32 handle; 20 struct nvif_object ctxdma;
18 } push; 21 } push;
19 22
20 /* TODO: this will be reworked in the near future */ 23 /* TODO: this will be reworked in the near future */
@@ -34,12 +37,12 @@ struct nouveau_channel {
34 u32 user_get; 37 u32 user_get;
35 u32 user_put; 38 u32 user_put;
36 39
37 struct nouveau_object *object; 40 struct nvif_object *object;
38}; 41};
39 42
40 43
41int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *, 44int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
42 u32 parent, u32 handle, u32 arg0, u32 arg1, 45 u32 handle, u32 arg0, u32 arg1,
43 struct nouveau_channel **); 46 struct nouveau_channel **);
44void nouveau_channel_del(struct nouveau_channel **); 47void nouveau_channel_del(struct nouveau_channel **);
45int nouveau_channel_idle(struct nouveau_channel *); 48int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index dbdc9ad59546..1ec44c83e919 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -42,9 +42,7 @@
42#include "nouveau_encoder.h" 42#include "nouveau_encoder.h"
43#include "nouveau_crtc.h" 43#include "nouveau_crtc.h"
44 44
45#include <subdev/i2c.h> 45#include <nvif/event.h>
46#include <subdev/gpio.h>
47#include <engine/disp.h>
48 46
49MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); 47MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
50static int nouveau_tv_disable = 0; 48static int nouveau_tv_disable = 0;
@@ -102,7 +100,7 @@ static void
102nouveau_connector_destroy(struct drm_connector *connector) 100nouveau_connector_destroy(struct drm_connector *connector)
103{ 101{
104 struct nouveau_connector *nv_connector = nouveau_connector(connector); 102 struct nouveau_connector *nv_connector = nouveau_connector(connector);
105 nouveau_event_ref(NULL, &nv_connector->hpd); 103 nvif_notify_fini(&nv_connector->hpd);
106 kfree(nv_connector->edid); 104 kfree(nv_connector->edid);
107 drm_connector_unregister(connector); 105 drm_connector_unregister(connector);
108 drm_connector_cleanup(connector); 106 drm_connector_cleanup(connector);
@@ -117,7 +115,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
117 struct drm_device *dev = connector->dev; 115 struct drm_device *dev = connector->dev;
118 struct nouveau_connector *nv_connector = nouveau_connector(connector); 116 struct nouveau_connector *nv_connector = nouveau_connector(connector);
119 struct nouveau_drm *drm = nouveau_drm(dev); 117 struct nouveau_drm *drm = nouveau_drm(dev);
120 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 118 struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
121 struct nouveau_encoder *nv_encoder; 119 struct nouveau_encoder *nv_encoder;
122 struct drm_encoder *encoder; 120 struct drm_encoder *encoder;
123 int i, panel = -ENODEV; 121 int i, panel = -ENODEV;
@@ -206,7 +204,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
206 return; 204 return;
207 nv_connector->detected_encoder = nv_encoder; 205 nv_connector->detected_encoder = nv_encoder;
208 206
209 if (nv_device(drm->device)->card_type >= NV_50) { 207 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
210 connector->interlace_allowed = true; 208 connector->interlace_allowed = true;
211 connector->doublescan_allowed = true; 209 connector->doublescan_allowed = true;
212 } else 210 } else
@@ -216,9 +214,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
216 connector->interlace_allowed = false; 214 connector->interlace_allowed = false;
217 } else { 215 } else {
218 connector->doublescan_allowed = true; 216 connector->doublescan_allowed = true;
219 if (nv_device(drm->device)->card_type == NV_20 || 217 if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
220 ((nv_device(drm->device)->card_type == NV_10 || 218 (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
221 nv_device(drm->device)->card_type == NV_11) &&
222 (dev->pdev->device & 0x0ff0) != 0x0100 && 219 (dev->pdev->device & 0x0ff0) != 0x0100 &&
223 (dev->pdev->device & 0x0ff0) != 0x0150)) 220 (dev->pdev->device & 0x0ff0) != 0x0150))
224 /* HW is broken */ 221 /* HW is broken */
@@ -802,11 +799,11 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
802 struct dcb_output *dcb = nv_connector->detected_encoder->dcb; 799 struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
803 800
804 if (dcb->location != DCB_LOC_ON_CHIP || 801 if (dcb->location != DCB_LOC_ON_CHIP ||
805 nv_device(drm->device)->chipset >= 0x46) 802 drm->device.info.chipset >= 0x46)
806 return 165000; 803 return 165000;
807 else if (nv_device(drm->device)->chipset >= 0x40) 804 else if (drm->device.info.chipset >= 0x40)
808 return 155000; 805 return 155000;
809 else if (nv_device(drm->device)->chipset >= 0x18) 806 else if (drm->device.info.chipset >= 0x18)
810 return 135000; 807 return 135000;
811 else 808 else
812 return 112000; 809 return 112000;
@@ -939,18 +936,19 @@ nouveau_connector_funcs_dp = {
939 .force = nouveau_connector_force 936 .force = nouveau_connector_force
940}; 937};
941 938
942static void 939static int
943nouveau_connector_hotplug_work(struct work_struct *work) 940nouveau_connector_hotplug(struct nvif_notify *notify)
944{ 941{
945 struct nouveau_connector *nv_connector = 942 struct nouveau_connector *nv_connector =
946 container_of(work, typeof(*nv_connector), work); 943 container_of(notify, typeof(*nv_connector), hpd);
947 struct drm_connector *connector = &nv_connector->base; 944 struct drm_connector *connector = &nv_connector->base;
948 struct nouveau_drm *drm = nouveau_drm(connector->dev); 945 struct nouveau_drm *drm = nouveau_drm(connector->dev);
946 const struct nvif_notify_conn_rep_v0 *rep = notify->data;
949 const char *name = connector->name; 947 const char *name = connector->name;
950 948
951 if (nv_connector->status & NVKM_HPD_IRQ) { 949 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
952 } else { 950 } else {
953 bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG); 951 bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
954 952
955 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); 953 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
956 954
@@ -961,16 +959,7 @@ nouveau_connector_hotplug_work(struct work_struct *work)
961 drm_helper_hpd_irq_event(connector->dev); 959 drm_helper_hpd_irq_event(connector->dev);
962 } 960 }
963 961
964 nouveau_event_get(nv_connector->hpd); 962 return NVIF_NOTIFY_KEEP;
965}
966
967static int
968nouveau_connector_hotplug(void *data, u32 type, int index)
969{
970 struct nouveau_connector *nv_connector = data;
971 nv_connector->status = type;
972 schedule_work(&nv_connector->work);
973 return NVKM_EVENT_DROP;
974} 963}
975 964
976static ssize_t 965static ssize_t
@@ -1040,7 +1029,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
1040 struct nouveau_drm *drm = nouveau_drm(dev); 1029 struct nouveau_drm *drm = nouveau_drm(dev);
1041 struct nouveau_display *disp = nouveau_display(dev); 1030 struct nouveau_display *disp = nouveau_display(dev);
1042 struct nouveau_connector *nv_connector = NULL; 1031 struct nouveau_connector *nv_connector = NULL;
1043 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
1044 struct drm_connector *connector; 1032 struct drm_connector *connector;
1045 int type, ret = 0; 1033 int type, ret = 0;
1046 bool dummy; 1034 bool dummy;
@@ -1194,7 +1182,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1194 1182
1195 switch (nv_connector->type) { 1183 switch (nv_connector->type) {
1196 case DCB_CONNECTOR_VGA: 1184 case DCB_CONNECTOR_VGA:
1197 if (nv_device(drm->device)->card_type >= NV_50) { 1185 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1198 drm_object_attach_property(&connector->base, 1186 drm_object_attach_property(&connector->base,
1199 dev->mode_config.scaling_mode_property, 1187 dev->mode_config.scaling_mode_property,
1200 nv_connector->scaling_mode); 1188 nv_connector->scaling_mode);
@@ -1226,16 +1214,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
1226 break; 1214 break;
1227 } 1215 }
1228 1216
1229 ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index, 1217 ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
1230 nouveau_connector_hotplug, 1218 true, NV04_DISP_NTFY_CONN,
1231 nv_connector, &nv_connector->hpd); 1219 &(struct nvif_notify_conn_req_v0) {
1220 .mask = NVIF_NOTIFY_CONN_V0_ANY,
1221 .conn = index,
1222 },
1223 sizeof(struct nvif_notify_conn_req_v0),
1224 sizeof(struct nvif_notify_conn_rep_v0),
1225 &nv_connector->hpd);
1232 if (ret) 1226 if (ret)
1233 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1227 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1234 else 1228 else
1235 connector->polled = DRM_CONNECTOR_POLL_HPD; 1229 connector->polled = DRM_CONNECTOR_POLL_HPD;
1236 1230
1237 INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
1238
1239 drm_connector_register(connector); 1231 drm_connector_register(connector);
1240 return connector; 1232 return connector;
1241} 1233}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 8861b6c579ad..68029d041dd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -27,14 +27,12 @@
27#ifndef __NOUVEAU_CONNECTOR_H__ 27#ifndef __NOUVEAU_CONNECTOR_H__
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include <nvif/notify.h>
31
30#include <drm/drm_edid.h> 32#include <drm/drm_edid.h>
31#include <drm/drm_dp_helper.h> 33#include <drm/drm_dp_helper.h>
32#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
33 35
34#include <core/event.h>
35
36#include <subdev/bios.h>
37
38struct nouveau_i2c_port; 36struct nouveau_i2c_port;
39 37
40enum nouveau_underscan_type { 38enum nouveau_underscan_type {
@@ -67,9 +65,7 @@ struct nouveau_connector {
67 u8 index; 65 u8 index;
68 u8 *dcb; 66 u8 *dcb;
69 67
70 struct nouveau_eventh *hpd; 68 struct nvif_notify hpd;
71 u32 status;
72 struct work_struct work;
73 69
74 struct drm_dp_aux aux; 70 struct drm_dp_aux aux;
75 71
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index a0534489d23f..f19cb1c5fc5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -27,10 +27,13 @@
27#ifndef __NOUVEAU_CRTC_H__ 27#ifndef __NOUVEAU_CRTC_H__
28#define __NOUVEAU_CRTC_H__ 28#define __NOUVEAU_CRTC_H__
29 29
30#include <nvif/notify.h>
31
30struct nouveau_crtc { 32struct nouveau_crtc {
31 struct drm_crtc base; 33 struct drm_crtc base;
32 34
33 int index; 35 int index;
36 struct nvif_notify vblank;
34 37
35 uint32_t dpms_saved_fp_control; 38 uint32_t dpms_saved_fp_control;
36 uint32_t fp_users; 39 uint32_t fp_users;
@@ -46,7 +49,7 @@ struct nouveau_crtc {
46 int cpp; 49 int cpp;
47 bool blanked; 50 bool blanked;
48 uint32_t offset; 51 uint32_t offset;
49 uint32_t tile_flags; 52 uint32_t handle;
50 } fb; 53 } fb;
51 54
52 struct { 55 struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 47ad74255bf1..1cc7b603c753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -27,6 +27,8 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include <nvif/class.h>
31
30#include "nouveau_fbcon.h" 32#include "nouveau_fbcon.h"
31#include "dispnv04/hw.h" 33#include "dispnv04/hw.h"
32#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
@@ -37,35 +39,42 @@
37 39
38#include "nouveau_fence.h" 40#include "nouveau_fence.h"
39 41
40#include <engine/disp.h> 42#include <nvif/event.h>
41
42#include <core/class.h>
43 43
44static int 44static int
45nouveau_display_vblank_handler(void *data, u32 type, int head) 45nouveau_display_vblank_handler(struct nvif_notify *notify)
46{ 46{
47 struct nouveau_drm *drm = data; 47 struct nouveau_crtc *nv_crtc =
48 drm_handle_vblank(drm->dev, head); 48 container_of(notify, typeof(*nv_crtc), vblank);
49 return NVKM_EVENT_KEEP; 49 drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
50 return NVIF_NOTIFY_KEEP;
50} 51}
51 52
52int 53int
53nouveau_display_vblank_enable(struct drm_device *dev, int head) 54nouveau_display_vblank_enable(struct drm_device *dev, int head)
54{ 55{
55 struct nouveau_display *disp = nouveau_display(dev); 56 struct drm_crtc *crtc;
56 if (disp) { 57 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
57 nouveau_event_get(disp->vblank[head]); 58 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
58 return 0; 59 if (nv_crtc->index == head) {
60 nvif_notify_get(&nv_crtc->vblank);
61 return 0;
62 }
59 } 63 }
60 return -EIO; 64 return -EINVAL;
61} 65}
62 66
63void 67void
64nouveau_display_vblank_disable(struct drm_device *dev, int head) 68nouveau_display_vblank_disable(struct drm_device *dev, int head)
65{ 69{
66 struct nouveau_display *disp = nouveau_display(dev); 70 struct drm_crtc *crtc;
67 if (disp) 71 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
68 nouveau_event_put(disp->vblank[head]); 72 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
73 if (nv_crtc->index == head) {
74 nvif_notify_put(&nv_crtc->vblank);
75 return;
76 }
77 }
69} 78}
70 79
71static inline int 80static inline int
@@ -86,17 +95,22 @@ int
86nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, 95nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
87 ktime_t *stime, ktime_t *etime) 96 ktime_t *stime, ktime_t *etime)
88{ 97{
89 const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index; 98 struct {
99 struct nv04_disp_mthd_v0 base;
100 struct nv04_disp_scanoutpos_v0 scan;
101 } args = {
102 .base.method = NV04_DISP_SCANOUTPOS,
103 .base.head = nouveau_crtc(crtc)->index,
104 };
90 struct nouveau_display *disp = nouveau_display(crtc->dev); 105 struct nouveau_display *disp = nouveau_display(crtc->dev);
91 struct nv04_display_scanoutpos args;
92 int ret, retry = 1; 106 int ret, retry = 1;
93 107
94 do { 108 do {
95 ret = nv_exec(disp->core, mthd, &args, sizeof(args)); 109 ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
96 if (ret != 0) 110 if (ret != 0)
97 return 0; 111 return 0;
98 112
99 if (args.vline) { 113 if (args.scan.vline) {
100 ret |= DRM_SCANOUTPOS_ACCURATE; 114 ret |= DRM_SCANOUTPOS_ACCURATE;
101 ret |= DRM_SCANOUTPOS_VALID; 115 ret |= DRM_SCANOUTPOS_VALID;
102 break; 116 break;
@@ -105,10 +119,11 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
105 if (retry) ndelay(crtc->linedur_ns); 119 if (retry) ndelay(crtc->linedur_ns);
106 } while (retry--); 120 } while (retry--);
107 121
108 *hpos = args.hline; 122 *hpos = args.scan.hline;
109 *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); 123 *vpos = calc(args.scan.vblanks, args.scan.vblanke,
110 if (stime) *stime = ns_to_ktime(args.time[0]); 124 args.scan.vtotal, args.scan.vline);
111 if (etime) *etime = ns_to_ktime(args.time[1]); 125 if (stime) *stime = ns_to_ktime(args.scan.time[0]);
126 if (etime) *etime = ns_to_ktime(args.scan.time[1]);
112 127
113 if (*vpos < 0) 128 if (*vpos < 0)
114 ret |= DRM_SCANOUTPOS_INVBL; 129 ret |= DRM_SCANOUTPOS_INVBL;
@@ -151,16 +166,13 @@ nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
151static void 166static void
152nouveau_display_vblank_fini(struct drm_device *dev) 167nouveau_display_vblank_fini(struct drm_device *dev)
153{ 168{
154 struct nouveau_display *disp = nouveau_display(dev); 169 struct drm_crtc *crtc;
155 int i;
156 170
157 drm_vblank_cleanup(dev); 171 drm_vblank_cleanup(dev);
158 172
159 if (disp->vblank) { 173 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
160 for (i = 0; i < dev->mode_config.num_crtc; i++) 174 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
161 nouveau_event_ref(NULL, &disp->vblank[i]); 175 nvif_notify_fini(&nv_crtc->vblank);
162 kfree(disp->vblank);
163 disp->vblank = NULL;
164 } 176 }
165} 177}
166 178
@@ -168,19 +180,20 @@ static int
168nouveau_display_vblank_init(struct drm_device *dev) 180nouveau_display_vblank_init(struct drm_device *dev)
169{ 181{
170 struct nouveau_display *disp = nouveau_display(dev); 182 struct nouveau_display *disp = nouveau_display(dev);
171 struct nouveau_drm *drm = nouveau_drm(dev); 183 struct drm_crtc *crtc;
172 struct nouveau_disp *pdisp = nouveau_disp(drm->device); 184 int ret;
173 int ret, i;
174
175 disp->vblank = kzalloc(dev->mode_config.num_crtc *
176 sizeof(*disp->vblank), GFP_KERNEL);
177 if (!disp->vblank)
178 return -ENOMEM;
179 185
180 for (i = 0; i < dev->mode_config.num_crtc; i++) { 186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
181 ret = nouveau_event_new(pdisp->vblank, 1, i, 187 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
182 nouveau_display_vblank_handler, 188 ret = nvif_notify_init(&disp->disp, NULL,
183 drm, &disp->vblank[i]); 189 nouveau_display_vblank_handler, false,
190 NV04_DISP_NTFY_VBLANK,
191 &(struct nvif_notify_head_req_v0) {
192 .head = nv_crtc->index,
193 },
194 sizeof(struct nvif_notify_head_req_v0),
195 sizeof(struct nvif_notify_head_rep_v0),
196 &nv_crtc->vblank);
184 if (ret) { 197 if (ret) {
185 nouveau_display_vblank_fini(dev); 198 nouveau_display_vblank_fini(dev);
186 return ret; 199 return ret;
@@ -200,6 +213,10 @@ static void
200nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 213nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
201{ 214{
202 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 215 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
216 struct nouveau_display *disp = nouveau_display(drm_fb->dev);
217
218 if (disp->fb_dtor)
219 disp->fb_dtor(drm_fb);
203 220
204 if (fb->nvbo) 221 if (fb->nvbo)
205 drm_gem_object_unreference_unlocked(&fb->nvbo->gem); 222 drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -229,63 +246,24 @@ nouveau_framebuffer_init(struct drm_device *dev,
229 struct drm_mode_fb_cmd2 *mode_cmd, 246 struct drm_mode_fb_cmd2 *mode_cmd,
230 struct nouveau_bo *nvbo) 247 struct nouveau_bo *nvbo)
231{ 248{
232 struct nouveau_drm *drm = nouveau_drm(dev); 249 struct nouveau_display *disp = nouveau_display(dev);
233 struct drm_framebuffer *fb = &nv_fb->base; 250 struct drm_framebuffer *fb = &nv_fb->base;
234 int ret; 251 int ret;
235 252
236 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 253 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
237 nv_fb->nvbo = nvbo; 254 nv_fb->nvbo = nvbo;
238 255
239 if (nv_device(drm->device)->card_type >= NV_50) {
240 u32 tile_flags = nouveau_bo_tile_layout(nvbo);
241 if (tile_flags == 0x7a00 ||
242 tile_flags == 0xfe00)
243 nv_fb->r_dma = NvEvoFB32;
244 else
245 if (tile_flags == 0x7000)
246 nv_fb->r_dma = NvEvoFB16;
247 else
248 nv_fb->r_dma = NvEvoVRAM_LP;
249
250 switch (fb->depth) {
251 case 8: nv_fb->r_format = 0x1e00; break;
252 case 15: nv_fb->r_format = 0xe900; break;
253 case 16: nv_fb->r_format = 0xe800; break;
254 case 24:
255 case 32: nv_fb->r_format = 0xcf00; break;
256 case 30: nv_fb->r_format = 0xd100; break;
257 default:
258 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
259 return -EINVAL;
260 }
261
262 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
263 NV_ERROR(drm, "framebuffer requires contiguous bo\n");
264 return -EINVAL;
265 }
266
267 if (nv_device(drm->device)->chipset == 0x50)
268 nv_fb->r_format |= (tile_flags << 8);
269
270 if (!tile_flags) {
271 if (nv_device(drm->device)->card_type < NV_D0)
272 nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
273 else
274 nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
275 } else {
276 u32 mode = nvbo->tile_mode;
277 if (nv_device(drm->device)->card_type >= NV_C0)
278 mode >>= 4;
279 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
280 }
281 }
282
283 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); 256 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
284 if (ret) { 257 if (ret)
285 return ret; 258 return ret;
259
260 if (disp->fb_ctor) {
261 ret = disp->fb_ctor(fb);
262 if (ret)
263 disp->fb_dtor(fb);
286 } 264 }
287 265
288 return 0; 266 return ret;
289} 267}
290 268
291static struct drm_framebuffer * 269static struct drm_framebuffer *
@@ -393,7 +371,7 @@ nouveau_display_init(struct drm_device *dev)
393 /* enable hotplug interrupts */ 371 /* enable hotplug interrupts */
394 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 372 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
395 struct nouveau_connector *conn = nouveau_connector(connector); 373 struct nouveau_connector *conn = nouveau_connector(connector);
396 if (conn->hpd) nouveau_event_get(conn->hpd); 374 nvif_notify_get(&conn->hpd);
397 } 375 }
398 376
399 return ret; 377 return ret;
@@ -404,37 +382,32 @@ nouveau_display_fini(struct drm_device *dev)
404{ 382{
405 struct nouveau_display *disp = nouveau_display(dev); 383 struct nouveau_display *disp = nouveau_display(dev);
406 struct drm_connector *connector; 384 struct drm_connector *connector;
385 int head;
386
387 /* Make sure that drm and hw vblank irqs get properly disabled. */
388 for (head = 0; head < dev->mode_config.num_crtc; head++)
389 drm_vblank_off(dev, head);
407 390
408 /* disable hotplug interrupts */ 391 /* disable hotplug interrupts */
409 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 392 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
410 struct nouveau_connector *conn = nouveau_connector(connector); 393 struct nouveau_connector *conn = nouveau_connector(connector);
411 if (conn->hpd) nouveau_event_put(conn->hpd); 394 nvif_notify_put(&conn->hpd);
412 } 395 }
413 396
414 drm_kms_helper_poll_disable(dev); 397 drm_kms_helper_poll_disable(dev);
415 disp->fini(dev); 398 disp->fini(dev);
416} 399}
417 400
418int 401static void
419nouveau_display_create(struct drm_device *dev) 402nouveau_display_create_properties(struct drm_device *dev)
420{ 403{
421 struct nouveau_drm *drm = nouveau_drm(dev); 404 struct nouveau_display *disp = nouveau_display(dev);
422 struct nouveau_device *device = nouveau_dev(dev); 405 int gen;
423 struct nouveau_display *disp;
424 int ret, gen;
425
426 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
427 if (!disp)
428 return -ENOMEM;
429
430 drm_mode_config_init(dev);
431 drm_mode_create_scaling_mode_property(dev);
432 drm_mode_create_dvi_i_properties(dev);
433 406
434 if (nv_device(drm->device)->card_type < NV_50) 407 if (disp->disp.oclass < NV50_DISP)
435 gen = 0; 408 gen = 0;
436 else 409 else
437 if (nv_device(drm->device)->card_type < NV_D0) 410 if (disp->disp.oclass < GF110_DISP)
438 gen = 1; 411 gen = 1;
439 else 412 else
440 gen = 2; 413 gen = 2;
@@ -449,26 +422,43 @@ nouveau_display_create(struct drm_device *dev)
449 disp->underscan_vborder_property = 422 disp->underscan_vborder_property =
450 drm_property_create_range(dev, 0, "underscan vborder", 0, 128); 423 drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
451 424
452 if (gen >= 1) { 425 if (gen < 1)
453 /* -90..+90 */ 426 return;
454 disp->vibrant_hue_property =
455 drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
456 427
457 /* -100..+100 */ 428 /* -90..+90 */
458 disp->color_vibrance_property = 429 disp->vibrant_hue_property =
459 drm_property_create_range(dev, 0, "color vibrance", 0, 200); 430 drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
460 } 431
432 /* -100..+100 */
433 disp->color_vibrance_property =
434 drm_property_create_range(dev, 0, "color vibrance", 0, 200);
435}
436
437int
438nouveau_display_create(struct drm_device *dev)
439{
440 struct nouveau_drm *drm = nouveau_drm(dev);
441 struct nouveau_display *disp;
442 int ret;
443
444 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
445 if (!disp)
446 return -ENOMEM;
447
448 drm_mode_config_init(dev);
449 drm_mode_create_scaling_mode_property(dev);
450 drm_mode_create_dvi_i_properties(dev);
461 451
462 dev->mode_config.funcs = &nouveau_mode_config_funcs; 452 dev->mode_config.funcs = &nouveau_mode_config_funcs;
463 dev->mode_config.fb_base = nv_device_resource_start(device, 1); 453 dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1);
464 454
465 dev->mode_config.min_width = 0; 455 dev->mode_config.min_width = 0;
466 dev->mode_config.min_height = 0; 456 dev->mode_config.min_height = 0;
467 if (nv_device(drm->device)->card_type < NV_10) { 457 if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
468 dev->mode_config.max_width = 2048; 458 dev->mode_config.max_width = 2048;
469 dev->mode_config.max_height = 2048; 459 dev->mode_config.max_height = 2048;
470 } else 460 } else
471 if (nv_device(drm->device)->card_type < NV_50) { 461 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
472 dev->mode_config.max_width = 4096; 462 dev->mode_config.max_width = 4096;
473 dev->mode_config.max_height = 4096; 463 dev->mode_config.max_height = 4096;
474 } else { 464 } else {
@@ -479,7 +469,7 @@ nouveau_display_create(struct drm_device *dev)
479 dev->mode_config.preferred_depth = 24; 469 dev->mode_config.preferred_depth = 24;
480 dev->mode_config.prefer_shadow = 1; 470 dev->mode_config.prefer_shadow = 1;
481 471
482 if (nv_device(drm->device)->chipset < 0x11) 472 if (drm->device.info.chipset < 0x11)
483 dev->mode_config.async_page_flip = false; 473 dev->mode_config.async_page_flip = false;
484 else 474 else
485 dev->mode_config.async_page_flip = true; 475 dev->mode_config.async_page_flip = true;
@@ -487,29 +477,30 @@ nouveau_display_create(struct drm_device *dev)
487 drm_kms_helper_poll_init(dev); 477 drm_kms_helper_poll_init(dev);
488 drm_kms_helper_poll_disable(dev); 478 drm_kms_helper_poll_disable(dev);
489 479
490 if (drm->vbios.dcb.entries) { 480 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
491 static const u16 oclass[] = { 481 static const u16 oclass[] = {
492 GM107_DISP_CLASS, 482 GM107_DISP,
493 NVF0_DISP_CLASS, 483 GK110_DISP,
494 NVE0_DISP_CLASS, 484 GK104_DISP,
495 NVD0_DISP_CLASS, 485 GF110_DISP,
496 NVA3_DISP_CLASS, 486 GT214_DISP,
497 NV94_DISP_CLASS, 487 GT206_DISP,
498 NVA0_DISP_CLASS, 488 GT200_DISP,
499 NV84_DISP_CLASS, 489 G82_DISP,
500 NV50_DISP_CLASS, 490 NV50_DISP,
501 NV04_DISP_CLASS, 491 NV04_DISP,
502 }; 492 };
503 int i; 493 int i;
504 494
505 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { 495 for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
506 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 496 ret = nvif_object_init(nvif_object(&drm->device), NULL,
507 NVDRM_DISPLAY, oclass[i], 497 NVDRM_DISPLAY, oclass[i],
508 NULL, 0, &disp->core); 498 NULL, 0, &disp->disp);
509 } 499 }
510 500
511 if (ret == 0) { 501 if (ret == 0) {
512 if (nv_mclass(disp->core) < NV50_DISP_CLASS) 502 nouveau_display_create_properties(dev);
503 if (disp->disp.oclass < NV50_DISP)
513 ret = nv04_display_create(dev); 504 ret = nv04_display_create(dev);
514 else 505 else
515 ret = nv50_display_create(dev); 506 ret = nv50_display_create(dev);
@@ -542,7 +533,6 @@ void
542nouveau_display_destroy(struct drm_device *dev) 533nouveau_display_destroy(struct drm_device *dev)
543{ 534{
544 struct nouveau_display *disp = nouveau_display(dev); 535 struct nouveau_display *disp = nouveau_display(dev);
545 struct nouveau_drm *drm = nouveau_drm(dev);
546 536
547 nouveau_backlight_exit(dev); 537 nouveau_backlight_exit(dev);
548 nouveau_display_vblank_fini(dev); 538 nouveau_display_vblank_fini(dev);
@@ -553,7 +543,7 @@ nouveau_display_destroy(struct drm_device *dev)
553 if (disp->dtor) 543 if (disp->dtor)
554 disp->dtor(dev); 544 disp->dtor(dev);
555 545
556 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY); 546 nvif_object_fini(&disp->disp);
557 547
558 nouveau_drm(dev)->display = NULL; 548 nouveau_drm(dev)->display = NULL;
559 kfree(disp); 549 kfree(disp);
@@ -620,6 +610,8 @@ void
620nouveau_display_resume(struct drm_device *dev) 610nouveau_display_resume(struct drm_device *dev)
621{ 611{
622 struct drm_crtc *crtc; 612 struct drm_crtc *crtc;
613 int head;
614
623 nouveau_display_init(dev); 615 nouveau_display_init(dev);
624 616
625 /* Force CLUT to get re-loaded during modeset */ 617 /* Force CLUT to get re-loaded during modeset */
@@ -629,6 +621,10 @@ nouveau_display_resume(struct drm_device *dev)
629 nv_crtc->lut.depth = 0; 621 nv_crtc->lut.depth = 0;
630 } 622 }
631 623
624 /* Make sure that drm and hw vblank irqs get resumed if needed. */
625 for (head = 0; head < dev->mode_config.num_crtc; head++)
626 drm_vblank_on(dev, head);
627
632 drm_helper_resume_force_mode(dev); 628 drm_helper_resume_force_mode(dev);
633 629
634 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 630 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -669,7 +665,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
669 if (ret) 665 if (ret)
670 goto fail; 666 goto fail;
671 667
672 if (nv_device(drm->device)->card_type < NV_C0) 668 if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
673 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 669 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
674 else 670 else
675 BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1); 671 BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
@@ -698,12 +694,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
698 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo; 694 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
699 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 695 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
700 struct nouveau_page_flip_state *s; 696 struct nouveau_page_flip_state *s;
701 struct nouveau_channel *chan = drm->channel; 697 struct nouveau_channel *chan;
698 struct nouveau_cli *cli;
702 struct nouveau_fence *fence; 699 struct nouveau_fence *fence;
703 int ret; 700 int ret;
704 701
705 if (!drm->channel) 702 chan = drm->channel;
703 if (!chan)
706 return -ENODEV; 704 return -ENODEV;
705 cli = (void *)nvif_client(&chan->device->base);
707 706
708 s = kzalloc(sizeof(*s), GFP_KERNEL); 707 s = kzalloc(sizeof(*s), GFP_KERNEL);
709 if (!s) 708 if (!s)
@@ -715,7 +714,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
715 goto fail_free; 714 goto fail_free;
716 } 715 }
717 716
718 mutex_lock(&chan->cli->mutex); 717 mutex_lock(&cli->mutex);
719 718
720 /* synchronise rendering channel with the kernel's channel */ 719 /* synchronise rendering channel with the kernel's channel */
721 spin_lock(&new_bo->bo.bdev->fence_lock); 720 spin_lock(&new_bo->bo.bdev->fence_lock);
@@ -740,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
740 drm_vblank_get(dev, nouveau_crtc(crtc)->index); 739 drm_vblank_get(dev, nouveau_crtc(crtc)->index);
741 740
742 /* Emit a page flip */ 741 /* Emit a page flip */
743 if (nv_device(drm->device)->card_type >= NV_50) { 742 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
744 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); 743 ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
745 if (ret) 744 if (ret)
746 goto fail_unreserve; 745 goto fail_unreserve;
@@ -769,7 +768,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
769 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 768 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
770 if (ret) 769 if (ret)
771 goto fail_unreserve; 770 goto fail_unreserve;
772 mutex_unlock(&chan->cli->mutex); 771 mutex_unlock(&cli->mutex);
773 772
774 /* Update the crtc struct and cleanup */ 773 /* Update the crtc struct and cleanup */
775 crtc->primary->fb = fb; 774 crtc->primary->fb = fb;
@@ -785,7 +784,7 @@ fail_unreserve:
785 drm_vblank_put(dev, nouveau_crtc(crtc)->index); 784 drm_vblank_put(dev, nouveau_crtc(crtc)->index);
786 ttm_bo_unreserve(&old_bo->bo); 785 ttm_bo_unreserve(&old_bo->bo);
787fail_unpin: 786fail_unpin:
788 mutex_unlock(&chan->cli->mutex); 787 mutex_unlock(&cli->mutex);
789 if (old_bo != new_bo) 788 if (old_bo != new_bo)
790 nouveau_bo_unpin(new_bo); 789 nouveau_bo_unpin(new_bo);
791fail_free: 790fail_free:
@@ -815,7 +814,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
815 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 814 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
816 if (s->event) { 815 if (s->event) {
817 /* Vblank timestamps/counts are only correct on >= NV-50 */ 816 /* Vblank timestamps/counts are only correct on >= NV-50 */
818 if (nv_device(drm->device)->card_type >= NV_50) 817 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
819 crtcid = s->crtc; 818 crtcid = s->crtc;
820 819
821 drm_send_vblank_event(dev, crtcid, s->event); 820 drm_send_vblank_event(dev, crtcid, s->event);
@@ -841,7 +840,7 @@ nouveau_flip_complete(void *data)
841 struct nouveau_page_flip_state state; 840 struct nouveau_page_flip_state state;
842 841
843 if (!nouveau_finish_page_flip(chan, &state)) { 842 if (!nouveau_finish_page_flip(chan, &state)) {
844 if (nv_device(drm->device)->card_type < NV_50) { 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 nv_set_crtc_base(drm->dev, state.crtc, state.offset + 844 nv_set_crtc_base(drm->dev, state.crtc, state.offset +
846 state.y * state.pitch + 845 state.y * state.pitch +
847 state.x * state.bpp / 8); 846 state.x * state.bpp / 8);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index a71cf77e55b2..88ca177cb1c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -9,9 +9,11 @@ struct nouveau_framebuffer {
9 struct drm_framebuffer base; 9 struct drm_framebuffer base;
10 struct nouveau_bo *nvbo; 10 struct nouveau_bo *nvbo;
11 struct nouveau_vma vma; 11 struct nouveau_vma vma;
12 u32 r_dma; 12 u32 r_handle;
13 u32 r_format; 13 u32 r_format;
14 u32 r_pitch; 14 u32 r_pitch;
15 struct nvif_object h_base[4];
16 struct nvif_object h_core;
15}; 17};
16 18
17static inline struct nouveau_framebuffer * 19static inline struct nouveau_framebuffer *
@@ -36,8 +38,10 @@ struct nouveau_display {
36 int (*init)(struct drm_device *); 38 int (*init)(struct drm_device *);
37 void (*fini)(struct drm_device *); 39 void (*fini)(struct drm_device *);
38 40
39 struct nouveau_object *core; 41 int (*fb_ctor)(struct drm_framebuffer *);
40 struct nouveau_eventh **vblank; 42 void (*fb_dtor)(struct drm_framebuffer *);
43
44 struct nvif_object disp;
41 45
42 struct drm_property *dithering_mode; 46 struct drm_property *dithering_mode;
43 struct drm_property *dithering_depth; 47 struct drm_property *dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c177272152e2..8508603cc8c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,8 +24,6 @@
24 * 24 *
25 */ 25 */
26 26
27#include <core/client.h>
28
29#include "nouveau_drm.h" 27#include "nouveau_drm.h"
30#include "nouveau_dma.h" 28#include "nouveau_dma.h"
31 29
@@ -54,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
54{ 52{
55 uint64_t val; 53 uint64_t val;
56 54
57 val = nv_ro32(chan->object, chan->user_get); 55 val = nvif_rd32(chan, chan->user_get);
58 if (chan->user_get_hi) 56 if (chan->user_get_hi)
59 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32; 57 val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
60 58
61 /* reset counter as long as GET is still advancing, this is 59 /* reset counter as long as GET is still advancing, this is
62 * to avoid misdetecting a GPU lockup if the GPU happens to 60 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -84,12 +82,13 @@ void
84nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 82nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
85 int delta, int length) 83 int delta, int length)
86{ 84{
85 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
87 struct nouveau_bo *pb = chan->push.buffer; 86 struct nouveau_bo *pb = chan->push.buffer;
88 struct nouveau_vma *vma; 87 struct nouveau_vma *vma;
89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 88 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
90 u64 offset; 89 u64 offset;
91 90
92 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm); 91 vma = nouveau_bo_vma_find(bo, cli->vm);
93 BUG_ON(!vma); 92 BUG_ON(!vma);
94 offset = vma->offset + delta; 93 offset = vma->offset + delta;
95 94
@@ -104,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
104 /* Flush writes. */ 103 /* Flush writes. */
105 nouveau_bo_rd32(pb, 0); 104 nouveau_bo_rd32(pb, 0);
106 105
107 nv_wo32(chan->object, 0x8c, chan->dma.ib_put); 106 nvif_wr32(chan, 0x8c, chan->dma.ib_put);
108 chan->dma.ib_free--; 107 chan->dma.ib_free--;
109} 108}
110 109
@@ -114,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
114 uint32_t cnt = 0, prev_get = 0; 113 uint32_t cnt = 0, prev_get = 0;
115 114
116 while (chan->dma.ib_free < count) { 115 while (chan->dma.ib_free < count) {
117 uint32_t get = nv_ro32(chan->object, 0x88); 116 uint32_t get = nvif_rd32(chan, 0x88);
118 if (get != prev_get) { 117 if (get != prev_get) {
119 prev_get = get; 118 prev_get = get;
120 cnt = 0; 119 cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dc0e0c5cadb4..8da0a272c45a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -58,31 +58,14 @@ enum {
58 FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */ 58 FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
59}; 59};
60 60
61/* Object handles. */ 61/* Object handles - for stuff that's doesn't use handle == oclass. */
62enum { 62enum {
63 NvM2MF = 0x80000001,
64 NvDmaFB = 0x80000002, 63 NvDmaFB = 0x80000002,
65 NvDmaTT = 0x80000003, 64 NvDmaTT = 0x80000003,
66 NvNotify0 = 0x80000006, 65 NvNotify0 = 0x80000006,
67 Nv2D = 0x80000007,
68 NvCtxSurf2D = 0x80000008,
69 NvRop = 0x80000009,
70 NvImagePatt = 0x8000000a,
71 NvClipRect = 0x8000000b,
72 NvGdiRect = 0x8000000c,
73 NvImageBlit = 0x8000000d,
74 NvSw = 0x8000000e,
75 NvSema = 0x8000000f, 66 NvSema = 0x8000000f,
76 NvEvoSema0 = 0x80000010, 67 NvEvoSema0 = 0x80000010,
77 NvEvoSema1 = 0x80000011, 68 NvEvoSema1 = 0x80000011,
78 NvNotify1 = 0x80000012,
79
80 /* G80+ display objects */
81 NvEvoVRAM = 0x01000000,
82 NvEvoFB16 = 0x01000001,
83 NvEvoFB32 = 0x01000002,
84 NvEvoVRAM_LP = 0x01000003,
85 NvEvoSync = 0xcafe0000
86}; 69};
87 70
88#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 71#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -157,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
157#define WRITE_PUT(val) do { \ 140#define WRITE_PUT(val) do { \
158 mb(); \ 141 mb(); \
159 nouveau_bo_rd32(chan->push.buffer, 0); \ 142 nouveau_bo_rd32(chan->push.buffer, 0); \
160 nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ 143 nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
161} while (0) 144} while (0)
162 145
163static inline void 146static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 5675ffc175ae..c5137cccce7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,11 +30,6 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32 32
33#include <core/class.h>
34
35#include <subdev/gpio.h>
36#include <subdev/i2c.h>
37
38static void 33static void
39nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, 34nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
40 u8 *dpcd) 35 u8 *dpcd)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c9428c943afb..250a5e88c751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -27,21 +27,14 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
30
30#include "drmP.h" 31#include "drmP.h"
31#include "drm_crtc_helper.h" 32#include "drm_crtc_helper.h"
33
32#include <core/device.h> 34#include <core/device.h>
33#include <core/client.h>
34#include <core/gpuobj.h> 35#include <core/gpuobj.h>
35#include <core/class.h>
36#include <core/option.h> 36#include <core/option.h>
37 37
38#include <engine/device.h>
39#include <engine/disp.h>
40#include <engine/fifo.h>
41#include <engine/software.h>
42
43#include <subdev/vm.h>
44
45#include "nouveau_drm.h" 38#include "nouveau_drm.h"
46#include "nouveau_dma.h" 39#include "nouveau_dma.h"
47#include "nouveau_ttm.h" 40#include "nouveau_ttm.h"
@@ -57,6 +50,7 @@
57#include "nouveau_fbcon.h" 50#include "nouveau_fbcon.h"
58#include "nouveau_fence.h" 51#include "nouveau_fence.h"
59#include "nouveau_debugfs.h" 52#include "nouveau_debugfs.h"
53#include "nouveau_usif.h"
60 54
61MODULE_PARM_DESC(config, "option string to pass to driver core"); 55MODULE_PARM_DESC(config, "option string to pass to driver core");
62static char *nouveau_config; 56static char *nouveau_config;
@@ -109,40 +103,37 @@ static int
109nouveau_cli_create(u64 name, const char *sname, 103nouveau_cli_create(u64 name, const char *sname,
110 int size, void **pcli) 104 int size, void **pcli)
111{ 105{
112 struct nouveau_cli *cli; 106 struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
113 int ret; 107 if (cli) {
114 108 int ret = nvif_client_init(NULL, NULL, sname, name,
115 *pcli = NULL; 109 nouveau_config, nouveau_debug,
116 ret = nouveau_client_create_(sname, name, nouveau_config, 110 &cli->base);
117 nouveau_debug, size, pcli); 111 if (ret == 0) {
118 cli = *pcli; 112 mutex_init(&cli->mutex);
119 if (ret) { 113 usif_client_init(cli);
120 if (cli) 114 }
121 nouveau_client_destroy(&cli->base);
122 *pcli = NULL;
123 return ret; 115 return ret;
124 } 116 }
125 117 return -ENOMEM;
126 mutex_init(&cli->mutex);
127 return 0;
128} 118}
129 119
130static void 120static void
131nouveau_cli_destroy(struct nouveau_cli *cli) 121nouveau_cli_destroy(struct nouveau_cli *cli)
132{ 122{
133 struct nouveau_object *client = nv_object(cli); 123 nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL);
134 nouveau_vm_ref(NULL, &cli->base.vm, NULL); 124 nvif_client_fini(&cli->base);
135 nouveau_client_fini(&cli->base, false); 125 usif_client_fini(cli);
136 atomic_set(&client->refcount, 1);
137 nouveau_object_ref(NULL, &client);
138} 126}
139 127
140static void 128static void
141nouveau_accel_fini(struct nouveau_drm *drm) 129nouveau_accel_fini(struct nouveau_drm *drm)
142{ 130{
143 nouveau_gpuobj_ref(NULL, &drm->notify);
144 nouveau_channel_del(&drm->channel); 131 nouveau_channel_del(&drm->channel);
132 nvif_object_fini(&drm->ntfy);
133 nouveau_gpuobj_ref(NULL, &drm->notify);
134 nvif_object_fini(&drm->nvsw);
145 nouveau_channel_del(&drm->cechan); 135 nouveau_channel_del(&drm->cechan);
136 nvif_object_fini(&drm->ttm.copy);
146 if (drm->fence) 137 if (drm->fence)
147 nouveau_fence(drm)->dtor(drm); 138 nouveau_fence(drm)->dtor(drm);
148} 139}
@@ -150,46 +141,71 @@ nouveau_accel_fini(struct nouveau_drm *drm)
150static void 141static void
151nouveau_accel_init(struct nouveau_drm *drm) 142nouveau_accel_init(struct nouveau_drm *drm)
152{ 143{
153 struct nouveau_device *device = nv_device(drm->device); 144 struct nvif_device *device = &drm->device;
154 struct nouveau_object *object;
155 u32 arg0, arg1; 145 u32 arg0, arg1;
156 int ret; 146 u32 sclass[16];
147 int ret, i;
157 148
158 if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/) 149 if (nouveau_noaccel)
159 return; 150 return;
160 151
161 /* initialise synchronisation routines */ 152 /* initialise synchronisation routines */
162 if (device->card_type < NV_10) ret = nv04_fence_create(drm); 153 /*XXX: this is crap, but the fence/channel stuff is a little
163 else if (device->card_type < NV_11 || 154 * backwards in some places. this will be fixed.
164 device->chipset < 0x17) ret = nv10_fence_create(drm); 155 */
165 else if (device->card_type < NV_50) ret = nv17_fence_create(drm); 156 ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
166 else if (device->chipset < 0x84) ret = nv50_fence_create(drm); 157 if (ret < 0)
167 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); 158 return;
168 else ret = nvc0_fence_create(drm); 159
160 for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
161 switch (sclass[i]) {
162 case NV03_CHANNEL_DMA:
163 ret = nv04_fence_create(drm);
164 break;
165 case NV10_CHANNEL_DMA:
166 ret = nv10_fence_create(drm);
167 break;
168 case NV17_CHANNEL_DMA:
169 case NV40_CHANNEL_DMA:
170 ret = nv17_fence_create(drm);
171 break;
172 case NV50_CHANNEL_GPFIFO:
173 ret = nv50_fence_create(drm);
174 break;
175 case G82_CHANNEL_GPFIFO:
176 ret = nv84_fence_create(drm);
177 break;
178 case FERMI_CHANNEL_GPFIFO:
179 case KEPLER_CHANNEL_GPFIFO_A:
180 ret = nvc0_fence_create(drm);
181 break;
182 default:
183 break;
184 }
185 }
186
169 if (ret) { 187 if (ret) {
170 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); 188 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
171 nouveau_accel_fini(drm); 189 nouveau_accel_fini(drm);
172 return; 190 return;
173 } 191 }
174 192
175 if (device->card_type >= NV_E0) { 193 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
176 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, 194 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
177 NVDRM_CHAN + 1, 195 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
178 NVE0_CHANNEL_IND_ENGINE_CE0 | 196 KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
179 NVE0_CHANNEL_IND_ENGINE_CE1, 0, 197 0, &drm->cechan);
180 &drm->cechan);
181 if (ret) 198 if (ret)
182 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 199 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
183 200
184 arg0 = NVE0_CHANNEL_IND_ENGINE_GR; 201 arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
185 arg1 = 1; 202 arg1 = 1;
186 } else 203 } else
187 if (device->chipset >= 0xa3 && 204 if (device->info.chipset >= 0xa3 &&
188 device->chipset != 0xaa && 205 device->info.chipset != 0xaa &&
189 device->chipset != 0xac) { 206 device->info.chipset != 0xac) {
190 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, 207 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
191 NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, 208 NvDmaFB, NvDmaTT, &drm->cechan);
192 &drm->cechan);
193 if (ret) 209 if (ret)
194 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 210 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
195 211
@@ -200,30 +216,30 @@ nouveau_accel_init(struct nouveau_drm *drm)
200 arg1 = NvDmaTT; 216 arg1 = NvDmaTT;
201 } 217 }
202 218
203 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, 219 ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1,
204 arg0, arg1, &drm->channel); 220 &drm->channel);
205 if (ret) { 221 if (ret) {
206 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); 222 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
207 nouveau_accel_fini(drm); 223 nouveau_accel_fini(drm);
208 return; 224 return;
209 } 225 }
210 226
211 ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW, 227 ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
212 nouveau_abi16_swclass(drm), NULL, 0, &object); 228 nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
213 if (ret == 0) { 229 if (ret == 0) {
214 struct nouveau_software_chan *swch = (void *)object->parent; 230 struct nouveau_software_chan *swch;
215 ret = RING_SPACE(drm->channel, 2); 231 ret = RING_SPACE(drm->channel, 2);
216 if (ret == 0) { 232 if (ret == 0) {
217 if (device->card_type < NV_C0) { 233 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
218 BEGIN_NV04(drm->channel, NvSubSw, 0, 1); 234 BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
219 OUT_RING (drm->channel, NVDRM_NVSW); 235 OUT_RING (drm->channel, NVDRM_NVSW);
220 } else 236 } else
221 if (device->card_type < NV_E0) { 237 if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
222 BEGIN_NVC0(drm->channel, FermiSw, 0, 1); 238 BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
223 OUT_RING (drm->channel, 0x001f0000); 239 OUT_RING (drm->channel, 0x001f0000);
224 } 240 }
225 } 241 }
226 swch = (void *)object->parent; 242 swch = (void *)nvkm_object(&drm->nvsw)->parent;
227 swch->flip = nouveau_flip_complete; 243 swch->flip = nouveau_flip_complete;
228 swch->flip_data = drm->channel; 244 swch->flip_data = drm->channel;
229 } 245 }
@@ -234,24 +250,24 @@ nouveau_accel_init(struct nouveau_drm *drm)
234 return; 250 return;
235 } 251 }
236 252
237 if (device->card_type < NV_C0) { 253 if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
238 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, 254 ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32,
239 &drm->notify); 255 0, 0, &drm->notify);
240 if (ret) { 256 if (ret) {
241 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); 257 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
242 nouveau_accel_fini(drm); 258 nouveau_accel_fini(drm);
243 return; 259 return;
244 } 260 }
245 261
246 ret = nouveau_object_new(nv_object(drm), 262 ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
247 drm->channel->handle, NvNotify0, 263 NV_DMA_IN_MEMORY,
248 0x003d, &(struct nv_dma_class) { 264 &(struct nv_dma_v0) {
249 .flags = NV_DMA_TARGET_VRAM | 265 .target = NV_DMA_V0_TARGET_VRAM,
250 NV_DMA_ACCESS_RDWR, 266 .access = NV_DMA_V0_ACCESS_RDWR,
251 .start = drm->notify->addr, 267 .start = drm->notify->addr,
252 .limit = drm->notify->addr + 31 268 .limit = drm->notify->addr + 31
253 }, sizeof(struct nv_dma_class), 269 }, sizeof(struct nv_dma_v0),
254 &object); 270 &drm->ntfy);
255 if (ret) { 271 if (ret) {
256 nouveau_accel_fini(drm); 272 nouveau_accel_fini(drm);
257 return; 273 return;
@@ -294,7 +310,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
294#ifdef CONFIG_X86 310#ifdef CONFIG_X86
295 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 311 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
296#endif 312#endif
297 remove_conflicting_framebuffers(aper, "nouveaufb", boot); 313 if (nouveau_modeset != 2)
314 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
298 kfree(aper); 315 kfree(aper);
299 316
300 ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI, 317 ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
@@ -348,7 +365,6 @@ static int
348nouveau_drm_load(struct drm_device *dev, unsigned long flags) 365nouveau_drm_load(struct drm_device *dev, unsigned long flags)
349{ 366{
350 struct pci_dev *pdev = dev->pdev; 367 struct pci_dev *pdev = dev->pdev;
351 struct nouveau_device *device;
352 struct nouveau_drm *drm; 368 struct nouveau_drm *drm;
353 int ret; 369 int ret;
354 370
@@ -359,7 +375,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
359 375
360 dev->dev_private = drm; 376 dev->dev_private = drm;
361 drm->dev = dev; 377 drm->dev = dev;
362 nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM"); 378 nvkm_client(&drm->client.base)->debug =
379 nouveau_dbgopt(nouveau_debug, "DRM");
363 380
364 INIT_LIST_HEAD(&drm->clients); 381 INIT_LIST_HEAD(&drm->clients);
365 spin_lock_init(&drm->tile.lock); 382 spin_lock_init(&drm->tile.lock);
@@ -370,33 +387,34 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
370 * (possibly) execute vbios init tables (see nouveau_agp.h) 387 * (possibly) execute vbios init tables (see nouveau_agp.h)
371 */ 388 */
372 if (pdev && drm_pci_device_is_agp(dev) && dev->agp) { 389 if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
390 const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
391 NV_DEVICE_V0_DISABLE_MMIO;
373 /* dummy device object, doesn't init anything, but allows 392 /* dummy device object, doesn't init anything, but allows
374 * agp code access to registers 393 * agp code access to registers
375 */ 394 */
376 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, 395 ret = nvif_device_init(&drm->client.base.base, NULL,
377 NVDRM_DEVICE, 0x0080, 396 NVDRM_DEVICE, NV_DEVICE,
378 &(struct nv_device_class) { 397 &(struct nv_device_v0) {
379 .device = ~0, 398 .device = ~0,
380 .disable = 399 .disable = ~enables,
381 ~(NV_DEVICE_DISABLE_MMIO |
382 NV_DEVICE_DISABLE_IDENTIFY),
383 .debug0 = ~0, 400 .debug0 = ~0,
384 }, sizeof(struct nv_device_class), 401 }, sizeof(struct nv_device_v0),
385 &drm->device); 402 &drm->device);
386 if (ret) 403 if (ret)
387 goto fail_device; 404 goto fail_device;
388 405
389 nouveau_agp_reset(drm); 406 nouveau_agp_reset(drm);
390 nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE); 407 nvif_device_fini(&drm->device);
391 } 408 }
392 409
393 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE, 410 ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
394 0x0080, &(struct nv_device_class) { 411 NV_DEVICE,
412 &(struct nv_device_v0) {
395 .device = ~0, 413 .device = ~0,
396 .disable = 0, 414 .disable = 0,
397 .debug0 = 0, 415 .debug0 = 0,
398 }, sizeof(struct nv_device_class), 416 }, sizeof(struct nv_device_v0),
399 &drm->device); 417 &drm->device);
400 if (ret) 418 if (ret)
401 goto fail_device; 419 goto fail_device;
402 420
@@ -406,18 +424,19 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
406 * nosnoop capability. hopefully won't cause issues until a 424 * nosnoop capability. hopefully won't cause issues until a
407 * better fix is found - assuming there is one... 425 * better fix is found - assuming there is one...
408 */ 426 */
409 device = nv_device(drm->device); 427 if (drm->device.info.chipset == 0xc1)
410 if (nv_device(drm->device)->chipset == 0xc1) 428 nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
411 nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
412 429
413 nouveau_vga_init(drm); 430 nouveau_vga_init(drm);
414 nouveau_agp_init(drm); 431 nouveau_agp_init(drm);
415 432
416 if (device->card_type >= NV_50) { 433 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
417 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 434 ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
418 0x1000, &drm->client.base.vm); 435 0x1000, &drm->client.vm);
419 if (ret) 436 if (ret)
420 goto fail_device; 437 goto fail_device;
438
439 nvkm_client(&drm->client.base)->vm = drm->client.vm;
421 } 440 }
422 441
423 ret = nouveau_ttm_init(drm); 442 ret = nouveau_ttm_init(drm);
@@ -463,6 +482,7 @@ fail_ttm:
463 nouveau_agp_fini(drm); 482 nouveau_agp_fini(drm);
464 nouveau_vga_fini(drm); 483 nouveau_vga_fini(drm);
465fail_device: 484fail_device:
485 nvif_device_fini(&drm->device);
466 nouveau_cli_destroy(&drm->client); 486 nouveau_cli_destroy(&drm->client);
467 return ret; 487 return ret;
468} 488}
@@ -488,26 +508,37 @@ nouveau_drm_unload(struct drm_device *dev)
488 nouveau_agp_fini(drm); 508 nouveau_agp_fini(drm);
489 nouveau_vga_fini(drm); 509 nouveau_vga_fini(drm);
490 510
511 nvif_device_fini(&drm->device);
491 if (drm->hdmi_device) 512 if (drm->hdmi_device)
492 pci_dev_put(drm->hdmi_device); 513 pci_dev_put(drm->hdmi_device);
493 nouveau_cli_destroy(&drm->client); 514 nouveau_cli_destroy(&drm->client);
494 return 0; 515 return 0;
495} 516}
496 517
497static void 518void
498nouveau_drm_remove(struct pci_dev *pdev) 519nouveau_drm_device_remove(struct drm_device *dev)
499{ 520{
500 struct drm_device *dev = pci_get_drvdata(pdev);
501 struct nouveau_drm *drm = nouveau_drm(dev); 521 struct nouveau_drm *drm = nouveau_drm(dev);
522 struct nouveau_client *client;
502 struct nouveau_object *device; 523 struct nouveau_object *device;
503 524
504 dev->irq_enabled = false; 525 dev->irq_enabled = false;
505 device = drm->client.base.device; 526 client = nvkm_client(&drm->client.base);
527 device = client->device;
506 drm_put_dev(dev); 528 drm_put_dev(dev);
507 529
508 nouveau_object_ref(NULL, &device); 530 nouveau_object_ref(NULL, &device);
509 nouveau_object_debug(); 531 nouveau_object_debug();
510} 532}
533EXPORT_SYMBOL(nouveau_drm_device_remove);
534
535static void
536nouveau_drm_remove(struct pci_dev *pdev)
537{
538 struct drm_device *dev = pci_get_drvdata(pdev);
539
540 nouveau_drm_device_remove(dev);
541}
511 542
512static int 543static int
513nouveau_do_suspend(struct drm_device *dev, bool runtime) 544nouveau_do_suspend(struct drm_device *dev, bool runtime)
@@ -548,13 +579,13 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
548 } 579 }
549 580
550 list_for_each_entry(cli, &drm->clients, head) { 581 list_for_each_entry(cli, &drm->clients, head) {
551 ret = nouveau_client_fini(&cli->base, true); 582 ret = nvif_client_suspend(&cli->base);
552 if (ret) 583 if (ret)
553 goto fail_client; 584 goto fail_client;
554 } 585 }
555 586
556 NV_INFO(drm, "suspending kernel object tree...\n"); 587 NV_INFO(drm, "suspending kernel object tree...\n");
557 ret = nouveau_client_fini(&drm->client.base, true); 588 ret = nvif_client_suspend(&drm->client.base);
558 if (ret) 589 if (ret)
559 goto fail_client; 590 goto fail_client;
560 591
@@ -563,7 +594,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
563 594
564fail_client: 595fail_client:
565 list_for_each_entry_continue_reverse(cli, &drm->clients, head) { 596 list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
566 nouveau_client_init(&cli->base); 597 nvif_client_resume(&cli->base);
567 } 598 }
568 599
569 if (drm->fence && nouveau_fence(drm)->resume) 600 if (drm->fence && nouveau_fence(drm)->resume)
@@ -611,7 +642,7 @@ nouveau_do_resume(struct drm_device *dev)
611 nouveau_agp_reset(drm); 642 nouveau_agp_reset(drm);
612 643
613 NV_INFO(drm, "resuming kernel object tree...\n"); 644 NV_INFO(drm, "resuming kernel object tree...\n");
614 nouveau_client_init(&drm->client.base); 645 nvif_client_resume(&drm->client.base);
615 nouveau_agp_init(drm); 646 nouveau_agp_init(drm);
616 647
617 NV_INFO(drm, "resuming client object trees...\n"); 648 NV_INFO(drm, "resuming client object trees...\n");
@@ -619,7 +650,7 @@ nouveau_do_resume(struct drm_device *dev)
619 nouveau_fence(drm)->resume(drm); 650 nouveau_fence(drm)->resume(drm);
620 651
621 list_for_each_entry(cli, &drm->clients, head) { 652 list_for_each_entry(cli, &drm->clients, head) {
622 nouveau_client_init(&cli->base); 653 nvif_client_resume(&cli->base);
623 } 654 }
624 655
625 nouveau_run_vbios_init(dev); 656 nouveau_run_vbios_init(dev);
@@ -715,13 +746,17 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
715 if (ret) 746 if (ret)
716 goto out_suspend; 747 goto out_suspend;
717 748
718 if (nv_device(drm->device)->card_type >= NV_50) { 749 cli->base.super = false;
719 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), 750
720 0x1000, &cli->base.vm); 751 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
752 ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
753 0x1000, &cli->vm);
721 if (ret) { 754 if (ret) {
722 nouveau_cli_destroy(cli); 755 nouveau_cli_destroy(cli);
723 goto out_suspend; 756 goto out_suspend;
724 } 757 }
758
759 nvkm_client(&cli->base)->vm = cli->vm;
725 } 760 }
726 761
727 fpriv->driver_priv = cli; 762 fpriv->driver_priv = cli;
@@ -779,24 +814,31 @@ nouveau_ioctls[] = {
779 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 814 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
780}; 815};
781 816
782long nouveau_drm_ioctl(struct file *filp, 817long
783 unsigned int cmd, unsigned long arg) 818nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
784{ 819{
785 struct drm_file *file_priv = filp->private_data; 820 struct drm_file *filp = file->private_data;
786 struct drm_device *dev; 821 struct drm_device *dev = filp->minor->dev;
787 long ret; 822 long ret;
788 dev = file_priv->minor->dev;
789 823
790 ret = pm_runtime_get_sync(dev->dev); 824 ret = pm_runtime_get_sync(dev->dev);
791 if (ret < 0 && ret != -EACCES) 825 if (ret < 0 && ret != -EACCES)
792 return ret; 826 return ret;
793 827
794 ret = drm_ioctl(filp, cmd, arg); 828 switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
829 case DRM_NOUVEAU_NVIF:
830 ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
831 break;
832 default:
833 ret = drm_ioctl(file, cmd, arg);
834 break;
835 }
795 836
796 pm_runtime_mark_last_busy(dev->dev); 837 pm_runtime_mark_last_busy(dev->dev);
797 pm_runtime_put_autosuspend(dev->dev); 838 pm_runtime_put_autosuspend(dev->dev);
798 return ret; 839 return ret;
799} 840}
841
800static const struct file_operations 842static const struct file_operations
801nouveau_driver_fops = { 843nouveau_driver_fops = {
802 .owner = THIS_MODULE, 844 .owner = THIS_MODULE,
@@ -921,7 +963,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
921{ 963{
922 struct pci_dev *pdev = to_pci_dev(dev); 964 struct pci_dev *pdev = to_pci_dev(dev);
923 struct drm_device *drm_dev = pci_get_drvdata(pdev); 965 struct drm_device *drm_dev = pci_get_drvdata(pdev);
924 struct nouveau_device *device = nouveau_dev(drm_dev); 966 struct nvif_device *device = &nouveau_drm(drm_dev)->device;
925 int ret; 967 int ret;
926 968
927 if (nouveau_runtime_pm == 0) 969 if (nouveau_runtime_pm == 0)
@@ -937,7 +979,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
937 ret = nouveau_do_resume(drm_dev); 979 ret = nouveau_do_resume(drm_dev);
938 drm_kms_helper_poll_enable(drm_dev); 980 drm_kms_helper_poll_enable(drm_dev);
939 /* do magic */ 981 /* do magic */
940 nv_mask(device, 0x88488, (1 << 25), (1 << 25)); 982 nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
941 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 983 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
942 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 984 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
943 nv_debug_level(NORMAL); 985 nv_debug_level(NORMAL);
@@ -1005,24 +1047,41 @@ nouveau_drm_pci_driver = {
1005 .driver.pm = &nouveau_pm_ops, 1047 .driver.pm = &nouveau_pm_ops,
1006}; 1048};
1007 1049
1008int nouveau_drm_platform_probe(struct platform_device *pdev) 1050struct drm_device *
1051nouveau_platform_device_create_(struct platform_device *pdev, int size,
1052 void **pobject)
1009{ 1053{
1010 struct nouveau_device *device; 1054 struct drm_device *drm;
1011 int ret; 1055 int err;
1012 1056
1013 ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM, 1057 err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM,
1014 nouveau_platform_name(pdev), 1058 nouveau_platform_name(pdev),
1015 dev_name(&pdev->dev), nouveau_config, 1059 dev_name(&pdev->dev), nouveau_config,
1016 nouveau_debug, &device); 1060 nouveau_debug, size, pobject);
1017 1061 if (err)
1018 ret = drm_platform_init(&driver, pdev); 1062 return ERR_PTR(err);
1019 if (ret) { 1063
1020 nouveau_object_ref(NULL, (struct nouveau_object **)&device); 1064 drm = drm_dev_alloc(&driver, &pdev->dev);
1021 return ret; 1065 if (!drm) {
1066 err = -ENOMEM;
1067 goto err_free;
1022 } 1068 }
1023 1069
1024 return ret; 1070 err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
1071 if (err < 0)
1072 goto err_free;
1073
1074 drm->platformdev = pdev;
1075 platform_set_drvdata(pdev, drm);
1076
1077 return drm;
1078
1079err_free:
1080 nouveau_object_ref(NULL, (struct nouveau_object **)pobject);
1081
1082 return ERR_PTR(err);
1025} 1083}
1084EXPORT_SYMBOL(nouveau_platform_device_create_);
1026 1085
1027static int __init 1086static int __init
1028nouveau_drm_init(void) 1087nouveau_drm_init(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 7efbafaf7c1d..b02b02452c85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
9#define DRIVER_DATE "20120801" 9#define DRIVER_DATE "20120801"
10 10
11#define DRIVER_MAJOR 1 11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 1 12#define DRIVER_MINOR 2
13#define DRIVER_PATCHLEVEL 1 13#define DRIVER_PATCHLEVEL 0
14 14
15/* 15/*
16 * 1.1.1: 16 * 1.1.1:
@@ -21,15 +21,17 @@
21 * to control registers on the MPs to enable performance counters, 21 * to control registers on the MPs to enable performance counters,
22 * and to control the warp error enable mask (OpenGL requires out of 22 * and to control the warp error enable mask (OpenGL requires out of
23 * bounds access to local memory to be silently ignored / return 0). 23 * bounds access to local memory to be silently ignored / return 0).
24 * 1.1.2:
25 * - fixes multiple bugs in flip completion events and timestamping
26 * 1.2.0:
27 * - object api exposed to userspace
28 * - fermi,kepler,maxwell zbc
24 */ 29 */
25 30
26#include <core/client.h> 31#include <nvif/client.h>
27#include <core/event.h> 32#include <nvif/device.h>
28
29#include <subdev/vm.h>
30 33
31#include <drmP.h> 34#include <drmP.h>
32#include <drm/nouveau_drm.h>
33 35
34#include <drm/ttm/ttm_bo_api.h> 36#include <drm/ttm/ttm_bo_api.h>
35#include <drm/ttm/ttm_bo_driver.h> 37#include <drm/ttm/ttm_bo_driver.h>
@@ -38,7 +40,10 @@
38#include <drm/ttm/ttm_module.h> 40#include <drm/ttm/ttm_module.h>
39#include <drm/ttm/ttm_page_alloc.h> 41#include <drm/ttm/ttm_page_alloc.h>
40 42
43#include "uapi/drm/nouveau_drm.h"
44
41struct nouveau_channel; 45struct nouveau_channel;
46struct platform_device;
42 47
43#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 48#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
44 49
@@ -50,6 +55,17 @@ struct nouveau_drm_tile {
50 bool used; 55 bool used;
51}; 56};
52 57
58enum nouveau_drm_object_route {
59 NVDRM_OBJECT_NVIF = 0,
60 NVDRM_OBJECT_USIF,
61 NVDRM_OBJECT_ABI16,
62};
63
64enum nouveau_drm_notify_route {
65 NVDRM_NOTIFY_NVIF = 0,
66 NVDRM_NOTIFY_USIF
67};
68
53enum nouveau_drm_handle { 69enum nouveau_drm_handle {
54 NVDRM_CLIENT = 0xffffffff, 70 NVDRM_CLIENT = 0xffffffff,
55 NVDRM_DEVICE = 0xdddddddd, 71 NVDRM_DEVICE = 0xdddddddd,
@@ -61,10 +77,13 @@ enum nouveau_drm_handle {
61}; 77};
62 78
63struct nouveau_cli { 79struct nouveau_cli {
64 struct nouveau_client base; 80 struct nvif_client base;
81 struct nouveau_vm *vm; /*XXX*/
65 struct list_head head; 82 struct list_head head;
66 struct mutex mutex; 83 struct mutex mutex;
67 void *abi16; 84 void *abi16;
85 struct list_head objects;
86 struct list_head notifys;
68}; 87};
69 88
70static inline struct nouveau_cli * 89static inline struct nouveau_cli *
@@ -73,13 +92,16 @@ nouveau_cli(struct drm_file *fpriv)
73 return fpriv ? fpriv->driver_priv : NULL; 92 return fpriv ? fpriv->driver_priv : NULL;
74} 93}
75 94
95#include <nvif/object.h>
96#include <nvif/device.h>
97
76extern int nouveau_runtime_pm; 98extern int nouveau_runtime_pm;
77 99
78struct nouveau_drm { 100struct nouveau_drm {
79 struct nouveau_cli client; 101 struct nouveau_cli client;
80 struct drm_device *dev; 102 struct drm_device *dev;
81 103
82 struct nouveau_object *device; 104 struct nvif_device device;
83 struct list_head clients; 105 struct list_head clients;
84 106
85 struct { 107 struct {
@@ -102,6 +124,7 @@ struct nouveau_drm {
102 struct ttm_buffer_object *, 124 struct ttm_buffer_object *,
103 struct ttm_mem_reg *, struct ttm_mem_reg *); 125 struct ttm_mem_reg *, struct ttm_mem_reg *);
104 struct nouveau_channel *chan; 126 struct nouveau_channel *chan;
127 struct nvif_object copy;
105 int mtrr; 128 int mtrr;
106 } ttm; 129 } ttm;
107 130
@@ -119,6 +142,8 @@ struct nouveau_drm {
119 struct nouveau_channel *channel; 142 struct nouveau_channel *channel;
120 struct nouveau_gpuobj *notify; 143 struct nouveau_gpuobj *notify;
121 struct nouveau_fbdev *fbcon; 144 struct nouveau_fbdev *fbcon;
145 struct nvif_object nvsw;
146 struct nvif_object ntfy;
122 147
123 /* nv10-nv40 tiling regions */ 148 /* nv10-nv40 tiling regions */
124 struct { 149 struct {
@@ -148,20 +173,25 @@ nouveau_drm(struct drm_device *dev)
148 return dev->dev_private; 173 return dev->dev_private;
149} 174}
150 175
151static inline struct nouveau_device *
152nouveau_dev(struct drm_device *dev)
153{
154 return nv_device(nouveau_drm(dev)->device);
155}
156
157int nouveau_pmops_suspend(struct device *); 176int nouveau_pmops_suspend(struct device *);
158int nouveau_pmops_resume(struct device *); 177int nouveau_pmops_resume(struct device *);
159 178
160#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 179#define nouveau_platform_device_create(p, u) \
161#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 180 nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
162#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) 181struct drm_device *
163#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args) 182nouveau_platform_device_create_(struct platform_device *pdev,
164#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args) 183 int size, void **pobject);
184void nouveau_drm_device_remove(struct drm_device *dev);
185
186#define NV_PRINTK(l,c,f,a...) do { \
187 struct nouveau_cli *_cli = (c); \
188 nv_##l(_cli->base.base.priv, f, ##a); \
189} while(0)
190#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
191#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
192#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
193#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
194#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
165 195
166extern int nouveau_modeset; 196extern int nouveau_modeset;
167 197
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 758c11cb9a9a..ebfe3180109e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -51,11 +51,6 @@
51 51
52#include "nouveau_crtc.h" 52#include "nouveau_crtc.h"
53 53
54#include <core/client.h>
55#include <core/device.h>
56
57#include <subdev/fb.h>
58
59MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 54MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
60static int nouveau_nofbaccel = 0; 55static int nouveau_nofbaccel = 0;
61module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 56module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
@@ -65,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
65{ 60{
66 struct nouveau_fbdev *fbcon = info->par; 61 struct nouveau_fbdev *fbcon = info->par;
67 struct nouveau_drm *drm = nouveau_drm(fbcon->dev); 62 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
68 struct nouveau_device *device = nv_device(drm->device); 63 struct nvif_device *device = &drm->device;
69 int ret; 64 int ret;
70 65
71 if (info->state != FBINFO_STATE_RUNNING) 66 if (info->state != FBINFO_STATE_RUNNING)
@@ -74,10 +69,10 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
74 ret = -ENODEV; 69 ret = -ENODEV;
75 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 70 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
76 mutex_trylock(&drm->client.mutex)) { 71 mutex_trylock(&drm->client.mutex)) {
77 if (device->card_type < NV_50) 72 if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
78 ret = nv04_fbcon_fillrect(info, rect); 73 ret = nv04_fbcon_fillrect(info, rect);
79 else 74 else
80 if (device->card_type < NV_C0) 75 if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
81 ret = nv50_fbcon_fillrect(info, rect); 76 ret = nv50_fbcon_fillrect(info, rect);
82 else 77 else
83 ret = nvc0_fbcon_fillrect(info, rect); 78 ret = nvc0_fbcon_fillrect(info, rect);
@@ -97,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
97{ 92{
98 struct nouveau_fbdev *fbcon = info->par; 93 struct nouveau_fbdev *fbcon = info->par;
99 struct nouveau_drm *drm = nouveau_drm(fbcon->dev); 94 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
100 struct nouveau_device *device = nv_device(drm->device); 95 struct nvif_device *device = &drm->device;
101 int ret; 96 int ret;
102 97
103 if (info->state != FBINFO_STATE_RUNNING) 98 if (info->state != FBINFO_STATE_RUNNING)
@@ -106,10 +101,10 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
106 ret = -ENODEV; 101 ret = -ENODEV;
107 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 102 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
108 mutex_trylock(&drm->client.mutex)) { 103 mutex_trylock(&drm->client.mutex)) {
109 if (device->card_type < NV_50) 104 if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
110 ret = nv04_fbcon_copyarea(info, image); 105 ret = nv04_fbcon_copyarea(info, image);
111 else 106 else
112 if (device->card_type < NV_C0) 107 if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
113 ret = nv50_fbcon_copyarea(info, image); 108 ret = nv50_fbcon_copyarea(info, image);
114 else 109 else
115 ret = nvc0_fbcon_copyarea(info, image); 110 ret = nvc0_fbcon_copyarea(info, image);
@@ -129,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
129{ 124{
130 struct nouveau_fbdev *fbcon = info->par; 125 struct nouveau_fbdev *fbcon = info->par;
131 struct nouveau_drm *drm = nouveau_drm(fbcon->dev); 126 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
132 struct nouveau_device *device = nv_device(drm->device); 127 struct nvif_device *device = &drm->device;
133 int ret; 128 int ret;
134 129
135 if (info->state != FBINFO_STATE_RUNNING) 130 if (info->state != FBINFO_STATE_RUNNING)
@@ -138,10 +133,10 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
138 ret = -ENODEV; 133 ret = -ENODEV;
139 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 134 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
140 mutex_trylock(&drm->client.mutex)) { 135 mutex_trylock(&drm->client.mutex)) {
141 if (device->card_type < NV_50) 136 if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
142 ret = nv04_fbcon_imageblit(info, image); 137 ret = nv04_fbcon_imageblit(info, image);
143 else 138 else
144 if (device->card_type < NV_C0) 139 if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
145 ret = nv50_fbcon_imageblit(info, image); 140 ret = nv50_fbcon_imageblit(info, image);
146 else 141 else
147 ret = nvc0_fbcon_imageblit(info, image); 142 ret = nvc0_fbcon_imageblit(info, image);
@@ -212,6 +207,65 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
212 .fb_debug_leave = drm_fb_helper_debug_leave, 207 .fb_debug_leave = drm_fb_helper_debug_leave,
213}; 208};
214 209
210void
211nouveau_fbcon_accel_save_disable(struct drm_device *dev)
212{
213 struct nouveau_drm *drm = nouveau_drm(dev);
214 if (drm->fbcon) {
215 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
216 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
217 }
218}
219
220void
221nouveau_fbcon_accel_restore(struct drm_device *dev)
222{
223 struct nouveau_drm *drm = nouveau_drm(dev);
224 if (drm->fbcon) {
225 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
226 }
227}
228
229void
230nouveau_fbcon_accel_fini(struct drm_device *dev)
231{
232 struct nouveau_drm *drm = nouveau_drm(dev);
233 struct nouveau_fbdev *fbcon = drm->fbcon;
234 if (fbcon && drm->channel) {
235 console_lock();
236 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
237 console_unlock();
238 nouveau_channel_idle(drm->channel);
239 nvif_object_fini(&fbcon->twod);
240 nvif_object_fini(&fbcon->blit);
241 nvif_object_fini(&fbcon->gdi);
242 nvif_object_fini(&fbcon->patt);
243 nvif_object_fini(&fbcon->rop);
244 nvif_object_fini(&fbcon->clip);
245 nvif_object_fini(&fbcon->surf2d);
246 }
247}
248
249void
250nouveau_fbcon_accel_init(struct drm_device *dev)
251{
252 struct nouveau_drm *drm = nouveau_drm(dev);
253 struct nouveau_fbdev *fbcon = drm->fbcon;
254 struct fb_info *info = fbcon->helper.fbdev;
255 int ret;
256
257 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
258 ret = nv04_fbcon_accel_init(info);
259 else
260 if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
261 ret = nv50_fbcon_accel_init(info);
262 else
263 ret = nvc0_fbcon_accel_init(info);
264
265 if (ret == 0)
266 info->fbops = &nouveau_fbcon_ops;
267}
268
215static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 269static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
216 u16 blue, int regno) 270 u16 blue, int regno)
217{ 271{
@@ -257,7 +311,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
257 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; 311 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
258 struct drm_device *dev = fbcon->dev; 312 struct drm_device *dev = fbcon->dev;
259 struct nouveau_drm *drm = nouveau_drm(dev); 313 struct nouveau_drm *drm = nouveau_drm(dev);
260 struct nouveau_device *device = nv_device(drm->device); 314 struct nvif_device *device = &drm->device;
261 struct fb_info *info; 315 struct fb_info *info;
262 struct drm_framebuffer *fb; 316 struct drm_framebuffer *fb;
263 struct nouveau_framebuffer *nouveau_fb; 317 struct nouveau_framebuffer *nouveau_fb;
@@ -299,8 +353,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
299 } 353 }
300 354
301 chan = nouveau_nofbaccel ? NULL : drm->channel; 355 chan = nouveau_nofbaccel ? NULL : drm->channel;
302 if (chan && device->card_type >= NV_50) { 356 if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
303 ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm, 357 ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
304 &fbcon->nouveau_fb.vma); 358 &fbcon->nouveau_fb.vma);
305 if (ret) { 359 if (ret) {
306 NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); 360 NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
@@ -357,20 +411,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
357 411
358 mutex_unlock(&dev->struct_mutex); 412 mutex_unlock(&dev->struct_mutex);
359 413
360 if (chan) { 414 if (chan)
361 ret = -ENODEV; 415 nouveau_fbcon_accel_init(dev);
362 if (device->card_type < NV_50)
363 ret = nv04_fbcon_accel_init(info);
364 else
365 if (device->card_type < NV_C0)
366 ret = nv50_fbcon_accel_init(info);
367 else
368 ret = nvc0_fbcon_accel_init(info);
369
370 if (ret == 0)
371 info->fbops = &nouveau_fbcon_ops;
372 }
373
374 nouveau_fbcon_zfill(dev, fbcon); 416 nouveau_fbcon_zfill(dev, fbcon);
375 417
376 /* To allow resizeing without swapping buffers */ 418 /* To allow resizeing without swapping buffers */
@@ -449,7 +491,6 @@ int
449nouveau_fbcon_init(struct drm_device *dev) 491nouveau_fbcon_init(struct drm_device *dev)
450{ 492{
451 struct nouveau_drm *drm = nouveau_drm(dev); 493 struct nouveau_drm *drm = nouveau_drm(dev);
452 struct nouveau_fb *pfb = nouveau_fb(drm->device);
453 struct nouveau_fbdev *fbcon; 494 struct nouveau_fbdev *fbcon;
454 int preferred_bpp; 495 int preferred_bpp;
455 int ret; 496 int ret;
@@ -476,10 +517,10 @@ nouveau_fbcon_init(struct drm_device *dev)
476 517
477 drm_fb_helper_single_add_all_connectors(&fbcon->helper); 518 drm_fb_helper_single_add_all_connectors(&fbcon->helper);
478 519
479 if (pfb->ram->size <= 32 * 1024 * 1024) 520 if (drm->device.info.ram_size <= 32 * 1024 * 1024)
480 preferred_bpp = 8; 521 preferred_bpp = 8;
481 else 522 else
482 if (pfb->ram->size <= 64 * 1024 * 1024) 523 if (drm->device.info.ram_size <= 64 * 1024 * 1024)
483 preferred_bpp = 16; 524 preferred_bpp = 16;
484 else 525 else
485 preferred_bpp = 32; 526 preferred_bpp = 32;
@@ -499,43 +540,25 @@ nouveau_fbcon_fini(struct drm_device *dev)
499 if (!drm->fbcon) 540 if (!drm->fbcon)
500 return; 541 return;
501 542
543 nouveau_fbcon_accel_fini(dev);
502 nouveau_fbcon_destroy(dev, drm->fbcon); 544 nouveau_fbcon_destroy(dev, drm->fbcon);
503 kfree(drm->fbcon); 545 kfree(drm->fbcon);
504 drm->fbcon = NULL; 546 drm->fbcon = NULL;
505} 547}
506 548
507void 549void
508nouveau_fbcon_save_disable_accel(struct drm_device *dev)
509{
510 struct nouveau_drm *drm = nouveau_drm(dev);
511 if (drm->fbcon) {
512 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
513 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
514 }
515}
516
517void
518nouveau_fbcon_restore_accel(struct drm_device *dev)
519{
520 struct nouveau_drm *drm = nouveau_drm(dev);
521 if (drm->fbcon) {
522 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
523 }
524}
525
526void
527nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 550nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
528{ 551{
529 struct nouveau_drm *drm = nouveau_drm(dev); 552 struct nouveau_drm *drm = nouveau_drm(dev);
530 if (drm->fbcon) { 553 if (drm->fbcon) {
531 console_lock(); 554 console_lock();
532 if (state == 1)
533 nouveau_fbcon_save_disable_accel(dev);
534 fb_set_suspend(drm->fbcon->helper.fbdev, state);
535 if (state == 0) { 555 if (state == 0) {
536 nouveau_fbcon_restore_accel(dev); 556 nouveau_fbcon_accel_restore(dev);
537 nouveau_fbcon_zfill(dev, drm->fbcon); 557 nouveau_fbcon_zfill(dev, drm->fbcon);
538 } 558 }
559 fb_set_suspend(drm->fbcon->helper.fbdev, state);
560 if (state == 1)
561 nouveau_fbcon_accel_save_disable(dev);
539 console_unlock(); 562 console_unlock();
540 } 563 }
541} 564}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fcff797d2084..34658cfa8f5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -37,6 +37,13 @@ struct nouveau_fbdev {
37 struct list_head fbdev_list; 37 struct list_head fbdev_list;
38 struct drm_device *dev; 38 struct drm_device *dev;
39 unsigned int saved_flags; 39 unsigned int saved_flags;
40 struct nvif_object surf2d;
41 struct nvif_object clip;
42 struct nvif_object rop;
43 struct nvif_object patt;
44 struct nvif_object gdi;
45 struct nvif_object blit;
46 struct nvif_object twod;
40}; 47};
41 48
42void nouveau_fbcon_restore(void); 49void nouveau_fbcon_restore(void);
@@ -61,8 +68,8 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
61int nouveau_fbcon_init(struct drm_device *dev); 68int nouveau_fbcon_init(struct drm_device *dev);
62void nouveau_fbcon_fini(struct drm_device *dev); 69void nouveau_fbcon_fini(struct drm_device *dev);
63void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); 70void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
64void nouveau_fbcon_save_disable_accel(struct drm_device *dev); 71void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
65void nouveau_fbcon_restore_accel(struct drm_device *dev); 72void nouveau_fbcon_accel_restore(struct drm_device *dev);
66 73
67void nouveau_fbcon_output_poll_changed(struct drm_device *dev); 74void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
68#endif /* __NV50_FBCON_H__ */ 75#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab5ea3b0d666..0a93114158cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -29,12 +29,13 @@
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31 31
32#include <nvif/notify.h>
33#include <nvif/event.h>
34
32#include "nouveau_drm.h" 35#include "nouveau_drm.h"
33#include "nouveau_dma.h" 36#include "nouveau_dma.h"
34#include "nouveau_fence.h" 37#include "nouveau_fence.h"
35 38
36#include <engine/fifo.h>
37
38struct fence_work { 39struct fence_work {
39 struct work_struct base; 40 struct work_struct base;
40 struct list_head head; 41 struct list_head head;
@@ -165,12 +166,18 @@ nouveau_fence_done(struct nouveau_fence *fence)
165 return !fence->channel; 166 return !fence->channel;
166} 167}
167 168
169struct nouveau_fence_wait {
170 struct nouveau_fence_priv *priv;
171 struct nvif_notify notify;
172};
173
168static int 174static int
169nouveau_fence_wait_uevent_handler(void *data, u32 type, int index) 175nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
170{ 176{
171 struct nouveau_fence_priv *priv = data; 177 struct nouveau_fence_wait *wait =
172 wake_up_all(&priv->waiting); 178 container_of(notify, typeof(*wait), notify);
173 return NVKM_EVENT_KEEP; 179 wake_up_all(&wait->priv->waiting);
180 return NVIF_NOTIFY_KEEP;
174} 181}
175 182
176static int 183static int
@@ -178,18 +185,22 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
178 185
179{ 186{
180 struct nouveau_channel *chan = fence->channel; 187 struct nouveau_channel *chan = fence->channel;
181 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
182 struct nouveau_fence_priv *priv = chan->drm->fence; 188 struct nouveau_fence_priv *priv = chan->drm->fence;
183 struct nouveau_eventh *handler; 189 struct nouveau_fence_wait wait = { .priv = priv };
184 int ret = 0; 190 int ret = 0;
185 191
186 ret = nouveau_event_new(pfifo->uevent, 1, 0, 192 ret = nvif_notify_init(chan->object, NULL,
187 nouveau_fence_wait_uevent_handler, 193 nouveau_fence_wait_uevent_handler, false,
188 priv, &handler); 194 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
195 &(struct nvif_notify_uevent_req) {
196 },
197 sizeof(struct nvif_notify_uevent_req),
198 sizeof(struct nvif_notify_uevent_rep),
199 &wait.notify);
189 if (ret) 200 if (ret)
190 return ret; 201 return ret;
191 202
192 nouveau_event_get(handler); 203 nvif_notify_get(&wait.notify);
193 204
194 if (fence->timeout) { 205 if (fence->timeout) {
195 unsigned long timeout = fence->timeout - jiffies; 206 unsigned long timeout = fence->timeout - jiffies;
@@ -221,7 +232,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
221 } 232 }
222 } 233 }
223 234
224 nouveau_event_ref(NULL, &handler); 235 nvif_notify_fini(&wait.notify);
225 if (unlikely(ret < 0)) 236 if (unlikely(ret < 0))
226 return ret; 237 return ret;
227 238
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df9d451afdcd..292a677bfed4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
24 * 24 *
25 */ 25 */
26 26
27#include <subdev/fb.h>
28
29#include "nouveau_drm.h" 27#include "nouveau_drm.h"
30#include "nouveau_dma.h" 28#include "nouveau_dma.h"
31#include "nouveau_fence.h" 29#include "nouveau_fence.h"
@@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
58 struct nouveau_vma *vma; 56 struct nouveau_vma *vma;
59 int ret; 57 int ret;
60 58
61 if (!cli->base.vm) 59 if (!cli->vm)
62 return 0; 60 return 0;
63 61
64 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 62 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
65 if (ret) 63 if (ret)
66 return ret; 64 return ret;
67 65
68 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 66 vma = nouveau_bo_vma_find(nvbo, cli->vm);
69 if (!vma) { 67 if (!vma) {
70 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 68 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
71 if (!vma) { 69 if (!vma) {
@@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
73 goto out; 71 goto out;
74 } 72 }
75 73
76 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); 74 ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
77 if (ret) { 75 if (ret) {
78 kfree(vma); 76 kfree(vma);
79 goto out; 77 goto out;
@@ -129,14 +127,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
129 struct nouveau_vma *vma; 127 struct nouveau_vma *vma;
130 int ret; 128 int ret;
131 129
132 if (!cli->base.vm) 130 if (!cli->vm)
133 return; 131 return;
134 132
135 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 133 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
136 if (ret) 134 if (ret)
137 return; 135 return;
138 136
139 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 137 vma = nouveau_bo_vma_find(nvbo, cli->vm);
140 if (vma) { 138 if (vma) {
141 if (--vma->refcount == 0) 139 if (--vma->refcount == 0)
142 nouveau_gem_object_unmap(nvbo, vma); 140 nouveau_gem_object_unmap(nvbo, vma);
@@ -173,7 +171,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
173 */ 171 */
174 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 172 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
175 NOUVEAU_GEM_DOMAIN_GART; 173 NOUVEAU_GEM_DOMAIN_GART;
176 if (nv_device(drm->device)->card_type >= NV_50) 174 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
177 nvbo->valid_domains &= domain; 175 nvbo->valid_domains &= domain;
178 176
179 /* Initialize the embedded gem-object. We return a single gem-reference 177 /* Initialize the embedded gem-object. We return a single gem-reference
@@ -202,8 +200,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
202 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 200 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
203 201
204 rep->offset = nvbo->bo.offset; 202 rep->offset = nvbo->bo.offset;
205 if (cli->base.vm) { 203 if (cli->vm) {
206 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 204 vma = nouveau_bo_vma_find(nvbo, cli->vm);
207 if (!vma) 205 if (!vma)
208 return -EINVAL; 206 return -EINVAL;
209 207
@@ -223,13 +221,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
223{ 221{
224 struct nouveau_drm *drm = nouveau_drm(dev); 222 struct nouveau_drm *drm = nouveau_drm(dev);
225 struct nouveau_cli *cli = nouveau_cli(file_priv); 223 struct nouveau_cli *cli = nouveau_cli(file_priv);
226 struct nouveau_fb *pfb = nouveau_fb(drm->device); 224 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
227 struct drm_nouveau_gem_new *req = data; 225 struct drm_nouveau_gem_new *req = data;
228 struct nouveau_bo *nvbo = NULL; 226 struct nouveau_bo *nvbo = NULL;
229 int ret = 0; 227 int ret = 0;
230 228
231 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 229 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
232 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); 230 NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
233 return -EINVAL; 231 return -EINVAL;
234 } 232 }
235 233
@@ -350,7 +348,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
350 ww_acquire_init(&op->ticket, &reservation_ww_class); 348 ww_acquire_init(&op->ticket, &reservation_ww_class);
351retry: 349retry:
352 if (++trycnt > 100000) { 350 if (++trycnt > 100000) {
353 NV_ERROR(cli, "%s failed and gave up.\n", __func__); 351 NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
354 return -EINVAL; 352 return -EINVAL;
355 } 353 }
356 354
@@ -361,7 +359,7 @@ retry:
361 359
362 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 360 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
363 if (!gem) { 361 if (!gem) {
364 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); 362 NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
365 ww_acquire_done(&op->ticket); 363 ww_acquire_done(&op->ticket);
366 validate_fini(op, NULL); 364 validate_fini(op, NULL);
367 return -ENOENT; 365 return -ENOENT;
@@ -374,7 +372,7 @@ retry:
374 } 372 }
375 373
376 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 374 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
377 NV_ERROR(cli, "multiple instances of buffer %d on " 375 NV_PRINTK(error, cli, "multiple instances of buffer %d on "
378 "validation list\n", b->handle); 376 "validation list\n", b->handle);
379 drm_gem_object_unreference_unlocked(gem); 377 drm_gem_object_unreference_unlocked(gem);
380 ww_acquire_done(&op->ticket); 378 ww_acquire_done(&op->ticket);
@@ -396,7 +394,7 @@ retry:
396 ww_acquire_fini(&op->ticket); 394 ww_acquire_fini(&op->ticket);
397 drm_gem_object_unreference_unlocked(gem); 395 drm_gem_object_unreference_unlocked(gem);
398 if (ret != -ERESTARTSYS) 396 if (ret != -ERESTARTSYS)
399 NV_ERROR(cli, "fail reserve\n"); 397 NV_PRINTK(error, cli, "fail reserve\n");
400 return ret; 398 return ret;
401 } 399 }
402 } 400 }
@@ -414,7 +412,7 @@ retry:
414 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 412 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
415 list_add_tail(&nvbo->entry, &op->gart_list); 413 list_add_tail(&nvbo->entry, &op->gart_list);
416 else { 414 else {
417 NV_ERROR(cli, "invalid valid domains: 0x%08x\n", 415 NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
418 b->valid_domains); 416 b->valid_domains);
419 list_add_tail(&nvbo->entry, &op->both_list); 417 list_add_tail(&nvbo->entry, &op->both_list);
420 ww_acquire_done(&op->ticket); 418 ww_acquire_done(&op->ticket);
@@ -465,24 +463,24 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
465 b->write_domains, 463 b->write_domains,
466 b->valid_domains); 464 b->valid_domains);
467 if (unlikely(ret)) { 465 if (unlikely(ret)) {
468 NV_ERROR(cli, "fail set_domain\n"); 466 NV_PRINTK(error, cli, "fail set_domain\n");
469 return ret; 467 return ret;
470 } 468 }
471 469
472 ret = nouveau_bo_validate(nvbo, true, false); 470 ret = nouveau_bo_validate(nvbo, true, false);
473 if (unlikely(ret)) { 471 if (unlikely(ret)) {
474 if (ret != -ERESTARTSYS) 472 if (ret != -ERESTARTSYS)
475 NV_ERROR(cli, "fail ttm_validate\n"); 473 NV_PRINTK(error, cli, "fail ttm_validate\n");
476 return ret; 474 return ret;
477 } 475 }
478 476
479 ret = validate_sync(chan, nvbo); 477 ret = validate_sync(chan, nvbo);
480 if (unlikely(ret)) { 478 if (unlikely(ret)) {
481 NV_ERROR(cli, "fail post-validate sync\n"); 479 NV_PRINTK(error, cli, "fail post-validate sync\n");
482 return ret; 480 return ret;
483 } 481 }
484 482
485 if (nv_device(drm->device)->card_type < NV_50) { 483 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
486 if (nvbo->bo.offset == b->presumed.offset && 484 if (nvbo->bo.offset == b->presumed.offset &&
487 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 485 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
488 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 486 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -527,14 +525,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
527 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 525 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
528 if (unlikely(ret)) { 526 if (unlikely(ret)) {
529 if (ret != -ERESTARTSYS) 527 if (ret != -ERESTARTSYS)
530 NV_ERROR(cli, "validate_init\n"); 528 NV_PRINTK(error, cli, "validate_init\n");
531 return ret; 529 return ret;
532 } 530 }
533 531
534 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); 532 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
535 if (unlikely(ret < 0)) { 533 if (unlikely(ret < 0)) {
536 if (ret != -ERESTARTSYS) 534 if (ret != -ERESTARTSYS)
537 NV_ERROR(cli, "validate vram_list\n"); 535 NV_PRINTK(error, cli, "validate vram_list\n");
538 validate_fini(op, NULL); 536 validate_fini(op, NULL);
539 return ret; 537 return ret;
540 } 538 }
@@ -543,7 +541,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
543 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); 541 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
544 if (unlikely(ret < 0)) { 542 if (unlikely(ret < 0)) {
545 if (ret != -ERESTARTSYS) 543 if (ret != -ERESTARTSYS)
546 NV_ERROR(cli, "validate gart_list\n"); 544 NV_PRINTK(error, cli, "validate gart_list\n");
547 validate_fini(op, NULL); 545 validate_fini(op, NULL);
548 return ret; 546 return ret;
549 } 547 }
@@ -552,7 +550,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
552 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); 550 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
553 if (unlikely(ret < 0)) { 551 if (unlikely(ret < 0)) {
554 if (ret != -ERESTARTSYS) 552 if (ret != -ERESTARTSYS)
555 NV_ERROR(cli, "validate both_list\n"); 553 NV_PRINTK(error, cli, "validate both_list\n");
556 validate_fini(op, NULL); 554 validate_fini(op, NULL);
557 return ret; 555 return ret;
558 } 556 }
@@ -613,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
613 uint32_t data; 611 uint32_t data;
614 612
615 if (unlikely(r->bo_index > req->nr_buffers)) { 613 if (unlikely(r->bo_index > req->nr_buffers)) {
616 NV_ERROR(cli, "reloc bo index invalid\n"); 614 NV_PRINTK(error, cli, "reloc bo index invalid\n");
617 ret = -EINVAL; 615 ret = -EINVAL;
618 break; 616 break;
619 } 617 }
@@ -623,7 +621,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
623 continue; 621 continue;
624 622
625 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 623 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
626 NV_ERROR(cli, "reloc container bo index invalid\n"); 624 NV_PRINTK(error, cli, "reloc container bo index invalid\n");
627 ret = -EINVAL; 625 ret = -EINVAL;
628 break; 626 break;
629 } 627 }
@@ -631,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
631 629
632 if (unlikely(r->reloc_bo_offset + 4 > 630 if (unlikely(r->reloc_bo_offset + 4 >
633 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 631 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
634 NV_ERROR(cli, "reloc outside of bo\n"); 632 NV_PRINTK(error, cli, "reloc outside of bo\n");
635 ret = -EINVAL; 633 ret = -EINVAL;
636 break; 634 break;
637 } 635 }
@@ -640,7 +638,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
640 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 638 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
641 &nvbo->kmap); 639 &nvbo->kmap);
642 if (ret) { 640 if (ret) {
643 NV_ERROR(cli, "failed kmap for reloc\n"); 641 NV_PRINTK(error, cli, "failed kmap for reloc\n");
644 break; 642 break;
645 } 643 }
646 nvbo->validate_mapped = true; 644 nvbo->validate_mapped = true;
@@ -665,7 +663,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
665 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 663 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
666 spin_unlock(&nvbo->bo.bdev->fence_lock); 664 spin_unlock(&nvbo->bo.bdev->fence_lock);
667 if (ret) { 665 if (ret) {
668 NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); 666 NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
669 break; 667 break;
670 } 668 }
671 669
@@ -696,7 +694,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
696 return -ENOMEM; 694 return -ENOMEM;
697 695
698 list_for_each_entry(temp, &abi16->channels, head) { 696 list_for_each_entry(temp, &abi16->channels, head) {
699 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { 697 if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
700 chan = temp->chan; 698 chan = temp->chan;
701 break; 699 break;
702 } 700 }
@@ -711,19 +709,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
711 goto out_next; 709 goto out_next;
712 710
713 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 711 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
714 NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", 712 NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
715 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 713 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
716 return nouveau_abi16_put(abi16, -EINVAL); 714 return nouveau_abi16_put(abi16, -EINVAL);
717 } 715 }
718 716
719 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 717 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
720 NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", 718 NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
721 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 719 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
722 return nouveau_abi16_put(abi16, -EINVAL); 720 return nouveau_abi16_put(abi16, -EINVAL);
723 } 721 }
724 722
725 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 723 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
726 NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", 724 NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
727 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 725 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
728 return nouveau_abi16_put(abi16, -EINVAL); 726 return nouveau_abi16_put(abi16, -EINVAL);
729 } 727 }
@@ -741,7 +739,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
741 /* Ensure all push buffers are on validate list */ 739 /* Ensure all push buffers are on validate list */
742 for (i = 0; i < req->nr_push; i++) { 740 for (i = 0; i < req->nr_push; i++) {
743 if (push[i].bo_index >= req->nr_buffers) { 741 if (push[i].bo_index >= req->nr_buffers) {
744 NV_ERROR(cli, "push %d buffer not in list\n", i); 742 NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
745 ret = -EINVAL; 743 ret = -EINVAL;
746 goto out_prevalid; 744 goto out_prevalid;
747 } 745 }
@@ -752,7 +750,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
752 req->nr_buffers, &op, &do_reloc); 750 req->nr_buffers, &op, &do_reloc);
753 if (ret) { 751 if (ret) {
754 if (ret != -ERESTARTSYS) 752 if (ret != -ERESTARTSYS)
755 NV_ERROR(cli, "validate: %d\n", ret); 753 NV_PRINTK(error, cli, "validate: %d\n", ret);
756 goto out_prevalid; 754 goto out_prevalid;
757 } 755 }
758 756
@@ -760,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
760 if (do_reloc) { 758 if (do_reloc) {
761 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); 759 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
762 if (ret) { 760 if (ret) {
763 NV_ERROR(cli, "reloc apply: %d\n", ret); 761 NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
764 goto out; 762 goto out;
765 } 763 }
766 } 764 }
@@ -768,7 +766,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
768 if (chan->dma.ib_max) { 766 if (chan->dma.ib_max) {
769 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 767 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
770 if (ret) { 768 if (ret) {
771 NV_ERROR(cli, "nv50cal_space: %d\n", ret); 769 NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
772 goto out; 770 goto out;
773 } 771 }
774 772
@@ -780,10 +778,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
780 push[i].length); 778 push[i].length);
781 } 779 }
782 } else 780 } else
783 if (nv_device(drm->device)->chipset >= 0x25) { 781 if (drm->device.info.chipset >= 0x25) {
784 ret = RING_SPACE(chan, req->nr_push * 2); 782 ret = RING_SPACE(chan, req->nr_push * 2);
785 if (ret) { 783 if (ret) {
786 NV_ERROR(cli, "cal_space: %d\n", ret); 784 NV_PRINTK(error, cli, "cal_space: %d\n", ret);
787 goto out; 785 goto out;
788 } 786 }
789 787
@@ -797,7 +795,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
797 } else { 795 } else {
798 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 796 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
799 if (ret) { 797 if (ret) {
800 NV_ERROR(cli, "jmp_space: %d\n", ret); 798 NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
801 goto out; 799 goto out;
802 } 800 }
803 801
@@ -835,7 +833,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
835 833
836 ret = nouveau_fence_new(chan, false, &fence); 834 ret = nouveau_fence_new(chan, false, &fence);
837 if (ret) { 835 if (ret) {
838 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); 836 NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
839 WIND_RING(chan); 837 WIND_RING(chan);
840 goto out; 838 goto out;
841 } 839 }
@@ -853,7 +851,7 @@ out_next:
853 req->suffix0 = 0x00000000; 851 req->suffix0 = 0x00000000;
854 req->suffix1 = 0x00000000; 852 req->suffix1 = 0x00000000;
855 } else 853 } else
856 if (nv_device(drm->device)->chipset >= 0x25) { 854 if (drm->device.info.chipset >= 0x25) {
857 req->suffix0 = 0x00020000; 855 req->suffix0 = 0x00020000;
858 req->suffix1 = 0x00000000; 856 req->suffix1 = 0x00000000;
859 } else { 857 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 19fd767bab10..afb36d66e78d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,17 +34,13 @@
34#include "nouveau_drm.h" 34#include "nouveau_drm.h"
35#include "nouveau_hwmon.h" 35#include "nouveau_hwmon.h"
36 36
37#include <subdev/gpio.h>
38#include <subdev/timer.h>
39#include <subdev/therm.h>
40
41#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 37#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
42static ssize_t 38static ssize_t
43nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 39nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
44{ 40{
45 struct drm_device *dev = dev_get_drvdata(d); 41 struct drm_device *dev = dev_get_drvdata(d);
46 struct nouveau_drm *drm = nouveau_drm(dev); 42 struct nouveau_drm *drm = nouveau_drm(dev);
47 struct nouveau_therm *therm = nouveau_therm(drm->device); 43 struct nouveau_therm *therm = nvkm_therm(&drm->device);
48 int temp = therm->temp_get(therm); 44 int temp = therm->temp_get(therm);
49 45
50 if (temp < 0) 46 if (temp < 0)
@@ -70,7 +66,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
70{ 66{
71 struct drm_device *dev = dev_get_drvdata(d); 67 struct drm_device *dev = dev_get_drvdata(d);
72 struct nouveau_drm *drm = nouveau_drm(dev); 68 struct nouveau_drm *drm = nouveau_drm(dev);
73 struct nouveau_therm *therm = nouveau_therm(drm->device); 69 struct nouveau_therm *therm = nvkm_therm(&drm->device);
74 70
75 return snprintf(buf, PAGE_SIZE, "%d\n", 71 return snprintf(buf, PAGE_SIZE, "%d\n",
76 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000); 72 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -82,7 +78,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
82{ 78{
83 struct drm_device *dev = dev_get_drvdata(d); 79 struct drm_device *dev = dev_get_drvdata(d);
84 struct nouveau_drm *drm = nouveau_drm(dev); 80 struct nouveau_drm *drm = nouveau_drm(dev);
85 struct nouveau_therm *therm = nouveau_therm(drm->device); 81 struct nouveau_therm *therm = nvkm_therm(&drm->device);
86 long value; 82 long value;
87 83
88 if (kstrtol(buf, 10, &value) == -EINVAL) 84 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -103,7 +99,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
103{ 99{
104 struct drm_device *dev = dev_get_drvdata(d); 100 struct drm_device *dev = dev_get_drvdata(d);
105 struct nouveau_drm *drm = nouveau_drm(dev); 101 struct nouveau_drm *drm = nouveau_drm(dev);
106 struct nouveau_therm *therm = nouveau_therm(drm->device); 102 struct nouveau_therm *therm = nvkm_therm(&drm->device);
107 103
108 return snprintf(buf, PAGE_SIZE, "%d\n", 104 return snprintf(buf, PAGE_SIZE, "%d\n",
109 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); 105 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -115,7 +111,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
115{ 111{
116 struct drm_device *dev = dev_get_drvdata(d); 112 struct drm_device *dev = dev_get_drvdata(d);
117 struct nouveau_drm *drm = nouveau_drm(dev); 113 struct nouveau_drm *drm = nouveau_drm(dev);
118 struct nouveau_therm *therm = nouveau_therm(drm->device); 114 struct nouveau_therm *therm = nvkm_therm(&drm->device);
119 long value; 115 long value;
120 116
121 if (kstrtol(buf, 10, &value) == -EINVAL) 117 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -135,7 +131,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
135{ 131{
136 struct drm_device *dev = dev_get_drvdata(d); 132 struct drm_device *dev = dev_get_drvdata(d);
137 struct nouveau_drm *drm = nouveau_drm(dev); 133 struct nouveau_drm *drm = nouveau_drm(dev);
138 struct nouveau_therm *therm = nouveau_therm(drm->device); 134 struct nouveau_therm *therm = nvkm_therm(&drm->device);
139 135
140 return snprintf(buf, PAGE_SIZE, "%d\n", 136 return snprintf(buf, PAGE_SIZE, "%d\n",
141 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000); 137 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
@@ -146,7 +142,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
146{ 142{
147 struct drm_device *dev = dev_get_drvdata(d); 143 struct drm_device *dev = dev_get_drvdata(d);
148 struct nouveau_drm *drm = nouveau_drm(dev); 144 struct nouveau_drm *drm = nouveau_drm(dev);
149 struct nouveau_therm *therm = nouveau_therm(drm->device); 145 struct nouveau_therm *therm = nvkm_therm(&drm->device);
150 long value; 146 long value;
151 147
152 if (kstrtol(buf, 10, &value) == -EINVAL) 148 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -166,7 +162,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
166{ 162{
167 struct drm_device *dev = dev_get_drvdata(d); 163 struct drm_device *dev = dev_get_drvdata(d);
168 struct nouveau_drm *drm = nouveau_drm(dev); 164 struct nouveau_drm *drm = nouveau_drm(dev);
169 struct nouveau_therm *therm = nouveau_therm(drm->device); 165 struct nouveau_therm *therm = nvkm_therm(&drm->device);
170 166
171 return snprintf(buf, PAGE_SIZE, "%d\n", 167 return snprintf(buf, PAGE_SIZE, "%d\n",
172 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); 168 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
@@ -177,7 +173,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
177{ 173{
178 struct drm_device *dev = dev_get_drvdata(d); 174 struct drm_device *dev = dev_get_drvdata(d);
179 struct nouveau_drm *drm = nouveau_drm(dev); 175 struct nouveau_drm *drm = nouveau_drm(dev);
180 struct nouveau_therm *therm = nouveau_therm(drm->device); 176 struct nouveau_therm *therm = nvkm_therm(&drm->device);
181 long value; 177 long value;
182 178
183 if (kstrtol(buf, 10, &value) == -EINVAL) 179 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -198,7 +194,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
198{ 194{
199 struct drm_device *dev = dev_get_drvdata(d); 195 struct drm_device *dev = dev_get_drvdata(d);
200 struct nouveau_drm *drm = nouveau_drm(dev); 196 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nouveau_therm *therm = nouveau_therm(drm->device); 197 struct nouveau_therm *therm = nvkm_therm(&drm->device);
202 198
203 return snprintf(buf, PAGE_SIZE, "%d\n", 199 return snprintf(buf, PAGE_SIZE, "%d\n",
204 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000); 200 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
@@ -210,7 +206,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
210{ 206{
211 struct drm_device *dev = dev_get_drvdata(d); 207 struct drm_device *dev = dev_get_drvdata(d);
212 struct nouveau_drm *drm = nouveau_drm(dev); 208 struct nouveau_drm *drm = nouveau_drm(dev);
213 struct nouveau_therm *therm = nouveau_therm(drm->device); 209 struct nouveau_therm *therm = nvkm_therm(&drm->device);
214 long value; 210 long value;
215 211
216 if (kstrtol(buf, 10, &value) == -EINVAL) 212 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -231,7 +227,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
231{ 227{
232 struct drm_device *dev = dev_get_drvdata(d); 228 struct drm_device *dev = dev_get_drvdata(d);
233 struct nouveau_drm *drm = nouveau_drm(dev); 229 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_therm *therm = nouveau_therm(drm->device); 230 struct nouveau_therm *therm = nvkm_therm(&drm->device);
235 231
236 return snprintf(buf, PAGE_SIZE, "%d\n", 232 return snprintf(buf, PAGE_SIZE, "%d\n",
237 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); 233 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
@@ -244,7 +240,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
244{ 240{
245 struct drm_device *dev = dev_get_drvdata(d); 241 struct drm_device *dev = dev_get_drvdata(d);
246 struct nouveau_drm *drm = nouveau_drm(dev); 242 struct nouveau_drm *drm = nouveau_drm(dev);
247 struct nouveau_therm *therm = nouveau_therm(drm->device); 243 struct nouveau_therm *therm = nvkm_therm(&drm->device);
248 long value; 244 long value;
249 245
250 if (kstrtol(buf, 10, &value) == -EINVAL) 246 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -264,7 +260,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
264{ 260{
265 struct drm_device *dev = dev_get_drvdata(d); 261 struct drm_device *dev = dev_get_drvdata(d);
266 struct nouveau_drm *drm = nouveau_drm(dev); 262 struct nouveau_drm *drm = nouveau_drm(dev);
267 struct nouveau_therm *therm = nouveau_therm(drm->device); 263 struct nouveau_therm *therm = nvkm_therm(&drm->device);
268 264
269 return snprintf(buf, PAGE_SIZE, "%d\n", 265 return snprintf(buf, PAGE_SIZE, "%d\n",
270 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000); 266 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
@@ -276,7 +272,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
276{ 272{
277 struct drm_device *dev = dev_get_drvdata(d); 273 struct drm_device *dev = dev_get_drvdata(d);
278 struct nouveau_drm *drm = nouveau_drm(dev); 274 struct nouveau_drm *drm = nouveau_drm(dev);
279 struct nouveau_therm *therm = nouveau_therm(drm->device); 275 struct nouveau_therm *therm = nvkm_therm(&drm->device);
280 long value; 276 long value;
281 277
282 if (kstrtol(buf, 10, &value) == -EINVAL) 278 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -297,7 +293,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
297{ 293{
298 struct drm_device *dev = dev_get_drvdata(d); 294 struct drm_device *dev = dev_get_drvdata(d);
299 struct nouveau_drm *drm = nouveau_drm(dev); 295 struct nouveau_drm *drm = nouveau_drm(dev);
300 struct nouveau_therm *therm = nouveau_therm(drm->device); 296 struct nouveau_therm *therm = nvkm_therm(&drm->device);
301 297
302 return snprintf(buf, PAGE_SIZE, "%d\n", 298 return snprintf(buf, PAGE_SIZE, "%d\n",
303 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); 299 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
@@ -310,7 +306,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
310{ 306{
311 struct drm_device *dev = dev_get_drvdata(d); 307 struct drm_device *dev = dev_get_drvdata(d);
312 struct nouveau_drm *drm = nouveau_drm(dev); 308 struct nouveau_drm *drm = nouveau_drm(dev);
313 struct nouveau_therm *therm = nouveau_therm(drm->device); 309 struct nouveau_therm *therm = nvkm_therm(&drm->device);
314 long value; 310 long value;
315 311
316 if (kstrtol(buf, 10, &value) == -EINVAL) 312 if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -350,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
350{ 346{
351 struct drm_device *dev = dev_get_drvdata(d); 347 struct drm_device *dev = dev_get_drvdata(d);
352 struct nouveau_drm *drm = nouveau_drm(dev); 348 struct nouveau_drm *drm = nouveau_drm(dev);
353 struct nouveau_therm *therm = nouveau_therm(drm->device); 349 struct nouveau_therm *therm = nvkm_therm(&drm->device);
354 350
355 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); 351 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
356} 352}
@@ -363,7 +359,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
363{ 359{
364 struct drm_device *dev = dev_get_drvdata(d); 360 struct drm_device *dev = dev_get_drvdata(d);
365 struct nouveau_drm *drm = nouveau_drm(dev); 361 struct nouveau_drm *drm = nouveau_drm(dev);
366 struct nouveau_therm *therm = nouveau_therm(drm->device); 362 struct nouveau_therm *therm = nvkm_therm(&drm->device);
367 int ret; 363 int ret;
368 364
369 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE); 365 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
@@ -379,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
379{ 375{
380 struct drm_device *dev = dev_get_drvdata(d); 376 struct drm_device *dev = dev_get_drvdata(d);
381 struct nouveau_drm *drm = nouveau_drm(dev); 377 struct nouveau_drm *drm = nouveau_drm(dev);
382 struct nouveau_therm *therm = nouveau_therm(drm->device); 378 struct nouveau_therm *therm = nvkm_therm(&drm->device);
383 long value; 379 long value;
384 int ret; 380 int ret;
385 381
@@ -402,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
402{ 398{
403 struct drm_device *dev = dev_get_drvdata(d); 399 struct drm_device *dev = dev_get_drvdata(d);
404 struct nouveau_drm *drm = nouveau_drm(dev); 400 struct nouveau_drm *drm = nouveau_drm(dev);
405 struct nouveau_therm *therm = nouveau_therm(drm->device); 401 struct nouveau_therm *therm = nvkm_therm(&drm->device);
406 int ret; 402 int ret;
407 403
408 ret = therm->fan_get(therm); 404 ret = therm->fan_get(therm);
@@ -418,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
418{ 414{
419 struct drm_device *dev = dev_get_drvdata(d); 415 struct drm_device *dev = dev_get_drvdata(d);
420 struct nouveau_drm *drm = nouveau_drm(dev); 416 struct nouveau_drm *drm = nouveau_drm(dev);
421 struct nouveau_therm *therm = nouveau_therm(drm->device); 417 struct nouveau_therm *therm = nvkm_therm(&drm->device);
422 int ret = -ENODEV; 418 int ret = -ENODEV;
423 long value; 419 long value;
424 420
@@ -442,7 +438,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
442{ 438{
443 struct drm_device *dev = dev_get_drvdata(d); 439 struct drm_device *dev = dev_get_drvdata(d);
444 struct nouveau_drm *drm = nouveau_drm(dev); 440 struct nouveau_drm *drm = nouveau_drm(dev);
445 struct nouveau_therm *therm = nouveau_therm(drm->device); 441 struct nouveau_therm *therm = nvkm_therm(&drm->device);
446 int ret; 442 int ret;
447 443
448 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY); 444 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
@@ -458,7 +454,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
458{ 454{
459 struct drm_device *dev = dev_get_drvdata(d); 455 struct drm_device *dev = dev_get_drvdata(d);
460 struct nouveau_drm *drm = nouveau_drm(dev); 456 struct nouveau_drm *drm = nouveau_drm(dev);
461 struct nouveau_therm *therm = nouveau_therm(drm->device); 457 struct nouveau_therm *therm = nvkm_therm(&drm->device);
462 long value; 458 long value;
463 int ret; 459 int ret;
464 460
@@ -482,7 +478,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
482{ 478{
483 struct drm_device *dev = dev_get_drvdata(d); 479 struct drm_device *dev = dev_get_drvdata(d);
484 struct nouveau_drm *drm = nouveau_drm(dev); 480 struct nouveau_drm *drm = nouveau_drm(dev);
485 struct nouveau_therm *therm = nouveau_therm(drm->device); 481 struct nouveau_therm *therm = nvkm_therm(&drm->device);
486 int ret; 482 int ret;
487 483
488 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY); 484 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
@@ -498,7 +494,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
498{ 494{
499 struct drm_device *dev = dev_get_drvdata(d); 495 struct drm_device *dev = dev_get_drvdata(d);
500 struct nouveau_drm *drm = nouveau_drm(dev); 496 struct nouveau_drm *drm = nouveau_drm(dev);
501 struct nouveau_therm *therm = nouveau_therm(drm->device); 497 struct nouveau_therm *therm = nvkm_therm(&drm->device);
502 long value; 498 long value;
503 int ret; 499 int ret;
504 500
@@ -565,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev)
565{ 561{
566#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 562#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
567 struct nouveau_drm *drm = nouveau_drm(dev); 563 struct nouveau_drm *drm = nouveau_drm(dev);
568 struct nouveau_therm *therm = nouveau_therm(drm->device); 564 struct nouveau_therm *therm = nvkm_therm(&drm->device);
569 struct nouveau_hwmon *hwmon; 565 struct nouveau_hwmon *hwmon;
570 struct device *hwmon_dev; 566 struct device *hwmon_dev;
571 int ret = 0; 567 int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
new file mode 100644
index 000000000000..47ca88623753
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25/*******************************************************************************
26 * NVIF client driver - NVKM directly linked
27 ******************************************************************************/
28
29#include <core/client.h>
30#include <core/notify.h>
31#include <core/ioctl.h>
32
33#include <nvif/client.h>
34#include <nvif/driver.h>
35#include <nvif/notify.h>
36#include <nvif/event.h>
37#include <nvif/ioctl.h>
38
39#include "nouveau_drm.h"
40#include "nouveau_usif.h"
41
42static void
43nvkm_client_unmap(void *priv, void *ptr, u32 size)
44{
45 iounmap(ptr);
46}
47
48static void *
49nvkm_client_map(void *priv, u64 handle, u32 size)
50{
51 return ioremap(handle, size);
52}
53
54static int
55nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
56{
57 return nvkm_ioctl(priv, super, data, size, hack);
58}
59
60static int
61nvkm_client_resume(void *priv)
62{
63 return nouveau_client_init(priv);
64}
65
66static int
67nvkm_client_suspend(void *priv)
68{
69 return nouveau_client_fini(priv, true);
70}
71
72static void
73nvkm_client_fini(void *priv)
74{
75 struct nouveau_object *client = priv;
76 nouveau_client_fini(nv_client(client), false);
77 atomic_set(&client->refcount, 1);
78 nouveau_object_ref(NULL, &client);
79}
80
81static int
82nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
83{
84 const union {
85 struct nvif_notify_req_v0 v0;
86 } *args = header;
87 u8 route;
88
89 if (length == sizeof(args->v0) && args->v0.version == 0) {
90 route = args->v0.route;
91 } else {
92 WARN_ON(1);
93 return NVKM_NOTIFY_DROP;
94 }
95
96 switch (route) {
97 case NVDRM_NOTIFY_NVIF:
98 return nvif_notify(header, length, data, size);
99 case NVDRM_NOTIFY_USIF:
100 return usif_notify(header, length, data, size);
101 default:
102 WARN_ON(1);
103 break;
104 }
105
106 return NVKM_NOTIFY_DROP;
107}
108
109static int
110nvkm_client_init(const char *name, u64 device, const char *cfg,
111 const char *dbg, void **ppriv)
112{
113 struct nouveau_client *client;
114 int ret;
115
116 ret = nouveau_client_create(name, device, cfg, dbg, &client);
117 *ppriv = client;
118 if (ret)
119 return ret;
120
121 client->ntfy = nvkm_client_ntfy;
122 return 0;
123}
124
125const struct nvif_driver
126nvif_driver_nvkm = {
127 .name = "nvkm",
128 .init = nvkm_client_init,
129 .fini = nvkm_client_fini,
130 .suspend = nvkm_client_suspend,
131 .resume = nvkm_client_resume,
132 .ioctl = nvkm_client_ioctl,
133 .map = nvkm_client_map,
134 .unmap = nvkm_client_unmap,
135 .keep = false,
136};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
new file mode 100644
index 000000000000..0ffeb50d0088
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/of.h>
28#include <linux/reset.h>
29#include <linux/regulator/consumer.h>
30#include <soc/tegra/pmc.h>
31
32#include "nouveau_drm.h"
33#include "nouveau_platform.h"
34
35static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
36{
37 int err;
38
39 err = regulator_enable(gpu->vdd);
40 if (err)
41 goto err_power;
42
43 err = clk_prepare_enable(gpu->clk);
44 if (err)
45 goto err_clk;
46 err = clk_prepare_enable(gpu->clk_pwr);
47 if (err)
48 goto err_clk_pwr;
49 clk_set_rate(gpu->clk_pwr, 204000000);
50 udelay(10);
51
52 reset_control_assert(gpu->rst);
53 udelay(10);
54
55 err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
56 if (err)
57 goto err_clamp;
58 udelay(10);
59
60 reset_control_deassert(gpu->rst);
61 udelay(10);
62
63 return 0;
64
65err_clamp:
66 clk_disable_unprepare(gpu->clk_pwr);
67err_clk_pwr:
68 clk_disable_unprepare(gpu->clk);
69err_clk:
70 regulator_disable(gpu->vdd);
71err_power:
72 return err;
73}
74
75static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
76{
77 int err;
78
79 reset_control_assert(gpu->rst);
80 udelay(10);
81
82 clk_disable_unprepare(gpu->clk_pwr);
83 clk_disable_unprepare(gpu->clk);
84 udelay(10);
85
86 err = regulator_disable(gpu->vdd);
87 if (err)
88 return err;
89
90 return 0;
91}
92
93static int nouveau_platform_probe(struct platform_device *pdev)
94{
95 struct nouveau_platform_gpu *gpu;
96 struct nouveau_platform_device *device;
97 struct drm_device *drm;
98 int err;
99
100 gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
101 if (!gpu)
102 return -ENOMEM;
103
104 gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
105 if (IS_ERR(gpu->vdd))
106 return PTR_ERR(gpu->vdd);
107
108 gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
109 if (IS_ERR(gpu->rst))
110 return PTR_ERR(gpu->rst);
111
112 gpu->clk = devm_clk_get(&pdev->dev, "gpu");
113 if (IS_ERR(gpu->clk))
114 return PTR_ERR(gpu->clk);
115
116 gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
117 if (IS_ERR(gpu->clk_pwr))
118 return PTR_ERR(gpu->clk_pwr);
119
120 err = nouveau_platform_power_up(gpu);
121 if (err)
122 return err;
123
124 drm = nouveau_platform_device_create(pdev, &device);
125 if (IS_ERR(drm)) {
126 err = PTR_ERR(drm);
127 goto power_down;
128 }
129
130 device->gpu = gpu;
131
132 err = drm_dev_register(drm, 0);
133 if (err < 0)
134 goto err_unref;
135
136 return 0;
137
138err_unref:
139 drm_dev_unref(drm);
140
141 return 0;
142
143power_down:
144 nouveau_platform_power_down(gpu);
145
146 return err;
147}
148
149static int nouveau_platform_remove(struct platform_device *pdev)
150{
151 struct drm_device *drm_dev = platform_get_drvdata(pdev);
152 struct nouveau_device *device = nouveau_dev(drm_dev);
153 struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
154
155 nouveau_drm_device_remove(drm_dev);
156
157 return nouveau_platform_power_down(gpu);
158}
159
160#if IS_ENABLED(CONFIG_OF)
161static const struct of_device_id nouveau_platform_match[] = {
162 { .compatible = "nvidia,gk20a" },
163 { }
164};
165
166MODULE_DEVICE_TABLE(of, nouveau_platform_match);
167#endif
168
169struct platform_driver nouveau_platform_driver = {
170 .driver = {
171 .name = "nouveau",
172 .of_match_table = of_match_ptr(nouveau_platform_match),
173 },
174 .probe = nouveau_platform_probe,
175 .remove = nouveau_platform_remove,
176};
177
178module_platform_driver(nouveau_platform_driver);
179
180MODULE_AUTHOR(DRIVER_AUTHOR);
181MODULE_DESCRIPTION(DRIVER_DESC);
182MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
new file mode 100644
index 000000000000..91f66504900e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_PLATFORM_H__
24#define __NOUVEAU_PLATFORM_H__
25
26#include "core/device.h"
27
28struct reset_control;
29struct clk;
30struct regulator;
31
32struct nouveau_platform_gpu {
33 struct reset_control *rst;
34 struct clk *clk;
35 struct clk *clk_pwr;
36
37 struct regulator *vdd;
38};
39
40struct nouveau_platform_device {
41 struct nouveau_device device;
42
43 struct nouveau_platform_gpu *gpu;
44};
45
46#define nv_device_to_platform(d) \
47 container_of(d, struct nouveau_platform_device, device)
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a4d22e5eb176..01707e7deaf5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,8 +1,6 @@
1#include <linux/pagemap.h> 1#include <linux/pagemap.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3 3
4#include <subdev/fb.h>
5
6#include "nouveau_drm.h" 4#include "nouveau_drm.h"
7#include "nouveau_ttm.h" 5#include "nouveau_ttm.h"
8 6
@@ -104,7 +102,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
104 return NULL; 102 return NULL;
105 103
106 nvbe->dev = drm->dev; 104 nvbe->dev = drm->dev;
107 if (nv_device(drm->device)->card_type < NV_50) 105 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
108 nvbe->ttm.ttm.func = &nv04_sgdma_backend; 106 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
109 else 107 else
110 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 108 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 75dda2b07176..3c6962d15b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -22,10 +22,15 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <nvif/os.h>
26#include <nvif/class.h>
27#include <nvif/ioctl.h>
28
25#include "nouveau_sysfs.h" 29#include "nouveau_sysfs.h"
26 30
27#include <core/object.h> 31MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future");
28#include <core/class.h> 32static int nouveau_pstate;
33module_param_named(pstate, nouveau_pstate, int, 0400);
29 34
30static inline struct drm_device * 35static inline struct drm_device *
31drm_device(struct device *d) 36drm_device(struct device *d)
@@ -43,38 +48,42 @@ static ssize_t
43nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) 48nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
44{ 49{
45 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); 50 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
46 struct nv_control_pstate_info info; 51 struct nvif_control_pstate_info_v0 info = {};
47 size_t cnt = PAGE_SIZE; 52 size_t cnt = PAGE_SIZE;
48 char *buf = b; 53 char *buf = b;
49 int ret, i; 54 int ret, i;
50 55
51 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info)); 56 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO,
57 &info, sizeof(info));
52 if (ret) 58 if (ret)
53 return ret; 59 return ret;
54 60
55 for (i = 0; i < info.count + 1; i++) { 61 for (i = 0; i < info.count + 1; i++) {
56 const s32 state = i < info.count ? i : 62 const s32 state = i < info.count ? i :
57 NV_CONTROL_PSTATE_ATTR_STATE_CURRENT; 63 NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
58 struct nv_control_pstate_attr attr = { 64 struct nvif_control_pstate_attr_v0 attr = {
59 .state = state, 65 .state = state,
60 .index = 0, 66 .index = 0,
61 }; 67 };
62 68
63 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, 69 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR,
64 &attr, sizeof(attr)); 70 &attr, sizeof(attr));
65 if (ret) 71 if (ret)
66 return ret; 72 return ret;
67 73
68 if (i < info.count) 74 if (i < info.count)
69 snappendf(buf, cnt, "%02x:", attr.state); 75 snappendf(buf, cnt, "%02x:", attr.state);
70 else 76 else
71 snappendf(buf, cnt, "--:"); 77 snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" :
78 info.pwrsrc == 1 ? "AC" :
79 "--");
72 80
73 attr.index = 0; 81 attr.index = 0;
74 do { 82 do {
75 attr.state = state; 83 attr.state = state;
76 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, 84 ret = nvif_mthd(&sysfs->ctrl,
77 &attr, sizeof(attr)); 85 NVIF_CONTROL_PSTATE_ATTR,
86 &attr, sizeof(attr));
78 if (ret) 87 if (ret)
79 return ret; 88 return ret;
80 89
@@ -84,9 +93,20 @@ nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
84 snappendf(buf, cnt, " %s", attr.unit); 93 snappendf(buf, cnt, " %s", attr.unit);
85 } while (attr.index); 94 } while (attr.index);
86 95
87 if ((state >= 0 && info.pstate == state) || 96 if (state >= 0) {
88 (state < 0 && info.ustate < 0)) 97 if (info.ustate_ac == state)
89 snappendf(buf, cnt, " *"); 98 snappendf(buf, cnt, " AC");
99 if (info.ustate_dc == state)
100 snappendf(buf, cnt, " DC");
101 if (info.pstate == state)
102 snappendf(buf, cnt, " *");
103 } else {
104 if (info.ustate_ac < -1)
105 snappendf(buf, cnt, " AC");
106 if (info.ustate_dc < -1)
107 snappendf(buf, cnt, " DC");
108 }
109
90 snappendf(buf, cnt, "\n"); 110 snappendf(buf, cnt, "\n");
91 } 111 }
92 112
@@ -98,26 +118,36 @@ nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
98 const char *buf, size_t count) 118 const char *buf, size_t count)
99{ 119{
100 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); 120 struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
101 struct nv_control_pstate_user args; 121 struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
102 long value, ret; 122 long value, ret;
103 char *tmp; 123 char *tmp;
104 124
105 if ((tmp = strchr(buf, '\n'))) 125 if ((tmp = strchr(buf, '\n')))
106 *tmp = '\0'; 126 *tmp = '\0';
107 127
128 if (!strncasecmp(buf, "dc:", 3)) {
129 args.pwrsrc = 0;
130 buf += 3;
131 } else
132 if (!strncasecmp(buf, "ac:", 3)) {
133 args.pwrsrc = 1;
134 buf += 3;
135 }
136
108 if (!strcasecmp(buf, "none")) 137 if (!strcasecmp(buf, "none"))
109 args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN; 138 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
110 else 139 else
111 if (!strcasecmp(buf, "auto")) 140 if (!strcasecmp(buf, "auto"))
112 args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON; 141 args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
113 else { 142 else {
114 ret = kstrtol(buf, 16, &value); 143 ret = kstrtol(buf, 16, &value);
115 if (ret) 144 if (ret)
116 return ret; 145 return ret;
117 args.state = value; 146 args.ustate = value;
118 } 147 }
119 148
120 ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args)); 149 ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER,
150 &args, sizeof(args));
121 if (ret < 0) 151 if (ret < 0)
122 return ret; 152 return ret;
123 153
@@ -132,11 +162,11 @@ nouveau_sysfs_fini(struct drm_device *dev)
132{ 162{
133 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); 163 struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
134 struct nouveau_drm *drm = nouveau_drm(dev); 164 struct nouveau_drm *drm = nouveau_drm(dev);
135 struct nouveau_device *device = nv_device(drm->device); 165 struct nvif_device *device = &drm->device;
136 166
137 if (sysfs->ctrl) { 167 if (sysfs && sysfs->ctrl.priv) {
138 device_remove_file(nv_device_base(device), &dev_attr_pstate); 168 device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
139 nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL); 169 nvif_object_fini(&sysfs->ctrl);
140 } 170 }
141 171
142 drm->sysfs = NULL; 172 drm->sysfs = NULL;
@@ -147,18 +177,22 @@ int
147nouveau_sysfs_init(struct drm_device *dev) 177nouveau_sysfs_init(struct drm_device *dev)
148{ 178{
149 struct nouveau_drm *drm = nouveau_drm(dev); 179 struct nouveau_drm *drm = nouveau_drm(dev);
150 struct nouveau_device *device = nv_device(drm->device); 180 struct nvif_device *device = &drm->device;
151 struct nouveau_sysfs *sysfs; 181 struct nouveau_sysfs *sysfs;
152 int ret; 182 int ret;
153 183
184 if (!nouveau_pstate)
185 return 0;
186
154 sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL); 187 sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
155 if (!sysfs) 188 if (!sysfs)
156 return -ENOMEM; 189 return -ENOMEM;
157 190
158 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL, 191 ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
159 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl); 192 NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
193 &sysfs->ctrl);
160 if (ret == 0) 194 if (ret == 0)
161 device_create_file(nv_device_base(device), &dev_attr_pstate); 195 device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
162 196
163 return 0; 197 return 0;
164} 198}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
index 74b47f1e01ed..f973378160f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -4,7 +4,7 @@
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6struct nouveau_sysfs { 6struct nouveau_sysfs {
7 struct nouveau_object *ctrl; 7 struct nvif_object ctrl;
8}; 8};
9 9
10static inline struct nouveau_sysfs * 10static inline struct nouveau_sysfs *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7e185c122750..53874b76b031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,10 +24,6 @@
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include <subdev/fb.h>
28#include <subdev/vm.h>
29#include <subdev/instmem.h>
30
31#include "nouveau_drm.h" 27#include "nouveau_drm.h"
32#include "nouveau_ttm.h" 28#include "nouveau_ttm.h"
33#include "nouveau_gem.h" 29#include "nouveau_gem.h"
@@ -36,7 +32,7 @@ static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 32nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{ 33{
38 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 34 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
39 struct nouveau_fb *pfb = nouveau_fb(drm->device); 35 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
40 man->priv = pfb; 36 man->priv = pfb;
41 return 0; 37 return 0;
42} 38}
@@ -67,7 +63,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
67 struct ttm_mem_reg *mem) 63 struct ttm_mem_reg *mem)
68{ 64{
69 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 65 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
70 struct nouveau_fb *pfb = nouveau_fb(drm->device); 66 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
71 nouveau_mem_node_cleanup(mem->mm_node); 67 nouveau_mem_node_cleanup(mem->mm_node);
72 pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); 68 pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
73} 69}
@@ -80,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
80 struct ttm_mem_reg *mem) 76 struct ttm_mem_reg *mem)
81{ 77{
82 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 78 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
83 struct nouveau_fb *pfb = nouveau_fb(drm->device); 79 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
84 struct nouveau_bo *nvbo = nouveau_bo(bo); 80 struct nouveau_bo *nvbo = nouveau_bo(bo);
85 struct nouveau_mem *node; 81 struct nouveau_mem *node;
86 u32 size_nc = 0; 82 u32 size_nc = 0;
@@ -176,14 +172,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
176 172
177 node->page_shift = 12; 173 node->page_shift = 12;
178 174
179 switch (nv_device(drm->device)->card_type) { 175 switch (drm->device.info.family) {
180 case NV_50: 176 case NV_DEVICE_INFO_V0_TESLA:
181 if (nv_device(drm->device)->chipset != 0x50) 177 if (drm->device.info.chipset != 0x50)
182 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 178 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
183 break; 179 break;
184 case NV_C0: 180 case NV_DEVICE_INFO_V0_FERMI:
185 case NV_D0: 181 case NV_DEVICE_INFO_V0_KEPLER:
186 case NV_E0:
187 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 182 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
188 break; 183 break;
189 default: 184 default:
@@ -208,12 +203,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
208 nouveau_gart_manager_debug 203 nouveau_gart_manager_debug
209}; 204};
210 205
206/*XXX*/
211#include <core/subdev/vm/nv04.h> 207#include <core/subdev/vm/nv04.h>
212static int 208static int
213nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 209nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
214{ 210{
215 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 211 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
216 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); 212 struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
217 struct nv04_vmmgr_priv *priv = (void *)vmm; 213 struct nv04_vmmgr_priv *priv = (void *)vmm;
218 struct nouveau_vm *vm = NULL; 214 struct nouveau_vm *vm = NULL;
219 nouveau_vm_ref(priv->vm, &vm, NULL); 215 nouveau_vm_ref(priv->vm, &vm, NULL);
@@ -357,12 +353,11 @@ int
357nouveau_ttm_init(struct nouveau_drm *drm) 353nouveau_ttm_init(struct nouveau_drm *drm)
358{ 354{
359 struct drm_device *dev = drm->dev; 355 struct drm_device *dev = drm->dev;
360 struct nouveau_device *device = nv_device(drm->device);
361 u32 bits; 356 u32 bits;
362 int ret; 357 int ret;
363 358
364 bits = nouveau_vmmgr(drm->device)->dma_bits; 359 bits = nvkm_vmmgr(&drm->device)->dma_bits;
365 if (nv_device_is_pci(device)) { 360 if (nv_device_is_pci(nvkm_device(&drm->device))) {
366 if (drm->agp.stat == ENABLED || 361 if (drm->agp.stat == ENABLED ||
367 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 362 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
368 bits = 32; 363 bits = 32;
@@ -394,8 +389,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
394 } 389 }
395 390
396 /* VRAM init */ 391 /* VRAM init */
397 drm->gem.vram_available = nouveau_fb(drm->device)->ram->size; 392 drm->gem.vram_available = drm->device.info.ram_user;
398 drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
399 393
400 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 394 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
401 drm->gem.vram_available >> PAGE_SHIFT); 395 drm->gem.vram_available >> PAGE_SHIFT);
@@ -404,12 +398,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
404 return ret; 398 return ret;
405 } 399 }
406 400
407 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1), 401 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
408 nv_device_resource_len(device, 1)); 402 nv_device_resource_len(nvkm_device(&drm->device), 1));
409 403
410 /* GART init */ 404 /* GART init */
411 if (drm->agp.stat != ENABLED) { 405 if (drm->agp.stat != ENABLED) {
412 drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; 406 drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
413 } else { 407 } else {
414 drm->gem.gart_available = drm->agp.size; 408 drm->gem.gart_available = drm->agp.size;
415 } 409 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
new file mode 100644
index 000000000000..cb1182d7e80e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -0,0 +1,384 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "nouveau_drm.h"
26#include "nouveau_usif.h"
27
28#include <nvif/notify.h>
29#include <nvif/unpack.h>
30#include <nvif/client.h>
31#include <nvif/event.h>
32#include <nvif/ioctl.h>
33
34struct usif_notify_p {
35 struct drm_pending_event base;
36 struct {
37 struct drm_event base;
38 u8 data[];
39 } e;
40};
41
42struct usif_notify {
43 struct list_head head;
44 atomic_t enabled;
45 u32 handle;
46 u16 reply;
47 u8 route;
48 u64 token;
49 struct usif_notify_p *p;
50};
51
52static inline struct usif_notify *
53usif_notify_find(struct drm_file *filp, u32 handle)
54{
55 struct nouveau_cli *cli = nouveau_cli(filp);
56 struct usif_notify *ntfy;
57 list_for_each_entry(ntfy, &cli->notifys, head) {
58 if (ntfy->handle == handle)
59 return ntfy;
60 }
61 return NULL;
62}
63
64static inline void
65usif_notify_dtor(struct usif_notify *ntfy)
66{
67 list_del(&ntfy->head);
68 kfree(ntfy);
69}
70
71int
72usif_notify(const void *header, u32 length, const void *data, u32 size)
73{
74 struct usif_notify *ntfy = NULL;
75 const union {
76 struct nvif_notify_rep_v0 v0;
77 } *rep = header;
78 struct drm_device *dev;
79 struct drm_file *filp;
80 unsigned long flags;
81
82 if (length == sizeof(rep->v0) && rep->v0.version == 0) {
83 if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token)))
84 return NVIF_NOTIFY_DROP;
85 BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF);
86 } else
87 if (WARN_ON(1))
88 return NVIF_NOTIFY_DROP;
89
90 if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
91 return NVIF_NOTIFY_DROP;
92 filp = ntfy->p->base.file_priv;
93 dev = filp->minor->dev;
94
95 memcpy(&ntfy->p->e.data[0], header, length);
96 memcpy(&ntfy->p->e.data[length], data, size);
97 switch (rep->v0.version) {
98 case 0: {
99 struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
100 rep->route = ntfy->route;
101 rep->token = ntfy->token;
102 }
103 break;
104 default:
105 BUG_ON(1);
106 break;
107 }
108
109 spin_lock_irqsave(&dev->event_lock, flags);
110 if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
111 list_add_tail(&ntfy->p->base.link, &filp->event_list);
112 filp->event_space -= ntfy->p->e.base.length;
113 }
114 wake_up_interruptible(&filp->event_wait);
115 spin_unlock_irqrestore(&dev->event_lock, flags);
116 atomic_set(&ntfy->enabled, 0);
117 return NVIF_NOTIFY_DROP;
118}
119
120static int
121usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
122{
123 struct nouveau_cli *cli = nouveau_cli(f);
124 struct nvif_client *client = &cli->base;
125 union {
126 struct nvif_ioctl_ntfy_new_v0 v0;
127 } *args = data;
128 union {
129 struct nvif_notify_req_v0 v0;
130 } *req;
131 struct usif_notify *ntfy;
132 int ret;
133
134 if (nvif_unpack(args->v0, 0, 0, true)) {
135 if (usif_notify_find(f, args->v0.index))
136 return -EEXIST;
137 } else
138 return ret;
139 req = data;
140
141 if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
142 return -ENOMEM;
143 atomic_set(&ntfy->enabled, 0);
144
145 if (nvif_unpack(req->v0, 0, 0, true)) {
146 ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
147 ntfy->route = req->v0.route;
148 ntfy->token = req->v0.token;
149 req->v0.route = NVDRM_NOTIFY_USIF;
150 req->v0.token = (unsigned long)(void *)ntfy;
151 ret = nvif_client_ioctl(client, argv, argc);
152 req->v0.token = ntfy->token;
153 req->v0.route = ntfy->route;
154 ntfy->handle = args->v0.index;
155 }
156
157 if (ret == 0)
158 list_add(&ntfy->head, &cli->notifys);
159 if (ret)
160 kfree(ntfy);
161 return ret;
162}
163
164static int
165usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
166{
167 struct nouveau_cli *cli = nouveau_cli(f);
168 struct nvif_client *client = &cli->base;
169 union {
170 struct nvif_ioctl_ntfy_del_v0 v0;
171 } *args = data;
172 struct usif_notify *ntfy;
173 int ret;
174
175 if (nvif_unpack(args->v0, 0, 0, true)) {
176 if (!(ntfy = usif_notify_find(f, args->v0.index)))
177 return -ENOENT;
178 } else
179 return ret;
180
181 ret = nvif_client_ioctl(client, argv, argc);
182 if (ret == 0)
183 usif_notify_dtor(ntfy);
184 return ret;
185}
186
187static int
188usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
189{
190 struct nouveau_cli *cli = nouveau_cli(f);
191 struct nvif_client *client = &cli->base;
192 union {
193 struct nvif_ioctl_ntfy_del_v0 v0;
194 } *args = data;
195 struct usif_notify *ntfy;
196 int ret;
197
198 if (nvif_unpack(args->v0, 0, 0, true)) {
199 if (!(ntfy = usif_notify_find(f, args->v0.index)))
200 return -ENOENT;
201 } else
202 return ret;
203
204 if (atomic_xchg(&ntfy->enabled, 1))
205 return 0;
206
207 ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
208 if (ret = -ENOMEM, !ntfy->p)
209 goto done;
210 ntfy->p->base.event = &ntfy->p->e.base;
211 ntfy->p->base.file_priv = f;
212 ntfy->p->base.pid = current->pid;
213 ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
214 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
215 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
216
217 ret = nvif_client_ioctl(client, argv, argc);
218done:
219 if (ret) {
220 atomic_set(&ntfy->enabled, 0);
221 kfree(ntfy->p);
222 }
223 return ret;
224}
225
226static int
227usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
228{
229 struct nouveau_cli *cli = nouveau_cli(f);
230 struct nvif_client *client = &cli->base;
231 union {
232 struct nvif_ioctl_ntfy_put_v0 v0;
233 } *args = data;
234 struct usif_notify *ntfy;
235 int ret;
236
237 if (nvif_unpack(args->v0, 0, 0, true)) {
238 if (!(ntfy = usif_notify_find(f, args->v0.index)))
239 return -ENOENT;
240 } else
241 return ret;
242
243 ret = nvif_client_ioctl(client, argv, argc);
244 if (ret == 0 && atomic_xchg(&ntfy->enabled, 0))
245 kfree(ntfy->p);
246 return ret;
247}
248
249struct usif_object {
250 struct list_head head;
251 struct list_head ntfy;
252 u8 route;
253 u64 token;
254};
255
256static void
257usif_object_dtor(struct usif_object *object)
258{
259 list_del(&object->head);
260 kfree(object);
261}
262
263static int
264usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
265{
266 struct nouveau_cli *cli = nouveau_cli(f);
267 struct nvif_client *client = &cli->base;
268 union {
269 struct nvif_ioctl_new_v0 v0;
270 } *args = data;
271 struct usif_object *object;
272 int ret;
273
274 if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
275 return -ENOMEM;
276 list_add(&object->head, &cli->objects);
277
278 if (nvif_unpack(args->v0, 0, 0, true)) {
279 object->route = args->v0.route;
280 object->token = args->v0.token;
281 args->v0.route = NVDRM_OBJECT_USIF;
282 args->v0.token = (unsigned long)(void *)object;
283 ret = nvif_client_ioctl(client, argv, argc);
284 args->v0.token = object->token;
285 args->v0.route = object->route;
286 }
287
288 if (ret)
289 usif_object_dtor(object);
290 return ret;
291}
292
293int
294usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
295{
296 struct nouveau_cli *cli = nouveau_cli(filp);
297 struct nvif_client *client = &cli->base;
298 void *data = kmalloc(argc, GFP_KERNEL);
299 u32 size = argc;
300 union {
301 struct nvif_ioctl_v0 v0;
302 } *argv = data;
303 struct usif_object *object;
304 u8 owner;
305 int ret;
306
307 if (ret = -ENOMEM, !argv)
308 goto done;
309 if (ret = -EFAULT, copy_from_user(argv, user, size))
310 goto done;
311
312 if (nvif_unpack(argv->v0, 0, 0, true)) {
313 /* block access to objects not created via this interface */
314 owner = argv->v0.owner;
315 argv->v0.owner = NVDRM_OBJECT_USIF;
316 } else
317 goto done;
318
319 mutex_lock(&cli->mutex);
320 switch (argv->v0.type) {
321 case NVIF_IOCTL_V0_NEW:
322 /* ... except if we're creating children */
323 argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
324 ret = usif_object_new(filp, data, size, argv, argc);
325 break;
326 case NVIF_IOCTL_V0_NTFY_NEW:
327 ret = usif_notify_new(filp, data, size, argv, argc);
328 break;
329 case NVIF_IOCTL_V0_NTFY_DEL:
330 ret = usif_notify_del(filp, data, size, argv, argc);
331 break;
332 case NVIF_IOCTL_V0_NTFY_GET:
333 ret = usif_notify_get(filp, data, size, argv, argc);
334 break;
335 case NVIF_IOCTL_V0_NTFY_PUT:
336 ret = usif_notify_put(filp, data, size, argv, argc);
337 break;
338 default:
339 ret = nvif_client_ioctl(client, argv, argc);
340 break;
341 }
342 if (argv->v0.route == NVDRM_OBJECT_USIF) {
343 object = (void *)(unsigned long)argv->v0.token;
344 argv->v0.route = object->route;
345 argv->v0.token = object->token;
346 if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
347 list_del(&object->head);
348 kfree(object);
349 }
350 } else {
351 argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
352 argv->v0.token = 0;
353 }
354 argv->v0.owner = owner;
355 mutex_unlock(&cli->mutex);
356
357 if (copy_to_user(user, argv, argc))
358 ret = -EFAULT;
359done:
360 kfree(argv);
361 return ret;
362}
363
364void
365usif_client_fini(struct nouveau_cli *cli)
366{
367 struct usif_object *object, *otemp;
368 struct usif_notify *notify, *ntemp;
369
370 list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) {
371 usif_notify_dtor(notify);
372 }
373
374 list_for_each_entry_safe(object, otemp, &cli->objects, head) {
375 usif_object_dtor(object);
376 }
377}
378
379void
380usif_client_init(struct nouveau_cli *cli)
381{
382 INIT_LIST_HEAD(&cli->objects);
383 INIT_LIST_HEAD(&cli->notifys);
384}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
new file mode 100644
index 000000000000..c037e3ae8c70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -0,0 +1,9 @@
1#ifndef __NOUVEAU_USIF_H__
2#define __NOUVEAU_USIF_H__
3
4void usif_client_init(struct nouveau_cli *);
5void usif_client_fini(struct nouveau_cli *);
6int usif_ioctl(struct drm_file *, void __user *, u32);
7int usif_notify(const void *, u32, const void *, u32);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 4f4c3fec6916..18d55d447248 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,14 +12,16 @@
12static unsigned int 12static unsigned int
13nouveau_vga_set_decode(void *priv, bool state) 13nouveau_vga_set_decode(void *priv, bool state)
14{ 14{
15 struct nouveau_device *device = nouveau_dev(priv); 15 struct nvif_device *device = &nouveau_drm(priv)->device;
16 16
17 if (device->card_type == NV_40 && device->chipset >= 0x4c) 17 if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
18 nv_wr32(device, 0x088060, state); 18 device->info.chipset >= 0x4c)
19 else if (device->chipset >= 0x40) 19 nvif_wr32(device, 0x088060, state);
20 nv_wr32(device, 0x088054, state);
21 else 20 else
22 nv_wr32(device, 0x001854, state); 21 if (device->info.chipset >= 0x40)
22 nvif_wr32(device, 0x088054, state);
23 else
24 nvif_wr32(device, 0x001854, state);
23 25
24 if (state) 26 if (state)
25 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 27 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8fe32bbed99a..4ef602c5469d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,8 +22,6 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <core/object.h>
26
27#include "nouveau_drm.h" 25#include "nouveau_drm.h"
28#include "nouveau_dma.h" 26#include "nouveau_dma.h"
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
@@ -141,8 +139,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
141 struct drm_device *dev = nfbdev->dev; 139 struct drm_device *dev = nfbdev->dev;
142 struct nouveau_drm *drm = nouveau_drm(dev); 140 struct nouveau_drm *drm = nouveau_drm(dev);
143 struct nouveau_channel *chan = drm->channel; 141 struct nouveau_channel *chan = drm->channel;
144 struct nouveau_device *device = nv_device(drm->device); 142 struct nvif_device *device = &drm->device;
145 struct nouveau_object *object;
146 int surface_fmt, pattern_fmt, rect_fmt; 143 int surface_fmt, pattern_fmt, rect_fmt;
147 int ret; 144 int ret;
148 145
@@ -174,35 +171,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
174 return -EINVAL; 171 return -EINVAL;
175 } 172 }
176 173
177 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D, 174 ret = nvif_object_init(chan->object, NULL, 0x0062,
178 device->card_type >= NV_10 ? 0x0062 : 0x0042, 175 device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
179 NULL, 0, &object); 176 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
180 if (ret) 177 if (ret)
181 return ret; 178 return ret;
182 179
183 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect, 180 ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
184 0x0019, NULL, 0, &object); 181 &nfbdev->clip);
185 if (ret) 182 if (ret)
186 return ret; 183 return ret;
187 184
188 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop, 185 ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
189 0x0043, NULL, 0, &object); 186 &nfbdev->rop);
190 if (ret) 187 if (ret)
191 return ret; 188 return ret;
192 189
193 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt, 190 ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
194 0x0044, NULL, 0, &object); 191 &nfbdev->patt);
195 if (ret) 192 if (ret)
196 return ret; 193 return ret;
197 194
198 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect, 195 ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
199 0x004a, NULL, 0, &object); 196 &nfbdev->gdi);
200 if (ret) 197 if (ret)
201 return ret; 198 return ret;
202 199
203 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit, 200 ret = nvif_object_init(chan->object, NULL, 0x005f,
204 device->chipset >= 0x11 ? 0x009f : 0x005f, 201 device->info.chipset >= 0x11 ? 0x009f : 0x005f,
205 NULL, 0, &object); 202 NULL, 0, &nfbdev->blit);
206 if (ret) 203 if (ret)
207 return ret; 204 return ret;
208 205
@@ -212,10 +209,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
212 } 209 }
213 210
214 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); 211 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
215 OUT_RING(chan, NvCtxSurf2D); 212 OUT_RING(chan, nfbdev->surf2d.handle);
216 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2); 213 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
217 OUT_RING(chan, NvDmaFB); 214 OUT_RING(chan, chan->vram.handle);
218 OUT_RING(chan, NvDmaFB); 215 OUT_RING(chan, chan->vram.handle);
219 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4); 216 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
220 OUT_RING(chan, surface_fmt); 217 OUT_RING(chan, surface_fmt);
221 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 218 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
@@ -223,12 +220,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
223 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
224 221
225 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); 222 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
226 OUT_RING(chan, NvRop); 223 OUT_RING(chan, nfbdev->rop.handle);
227 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1); 224 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
228 OUT_RING(chan, 0x55); 225 OUT_RING(chan, 0x55);
229 226
230 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); 227 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
231 OUT_RING(chan, NvImagePatt); 228 OUT_RING(chan, nfbdev->patt.handle);
232 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8); 229 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
233 OUT_RING(chan, pattern_fmt); 230 OUT_RING(chan, pattern_fmt);
234#ifdef __BIG_ENDIAN 231#ifdef __BIG_ENDIAN
@@ -244,18 +241,18 @@ nv04_fbcon_accel_init(struct fb_info *info)
244 OUT_RING(chan, ~0); 241 OUT_RING(chan, ~0);
245 242
246 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); 243 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
247 OUT_RING(chan, NvClipRect); 244 OUT_RING(chan, nfbdev->clip.handle);
248 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2); 245 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
249 OUT_RING(chan, 0); 246 OUT_RING(chan, 0);
250 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 247 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
251 248
252 BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1); 249 BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
253 OUT_RING(chan, NvImageBlit); 250 OUT_RING(chan, nfbdev->blit.handle);
254 BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1); 251 BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
255 OUT_RING(chan, NvCtxSurf2D); 252 OUT_RING(chan, nfbdev->surf2d.handle);
256 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); 253 BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
257 OUT_RING(chan, 3); 254 OUT_RING(chan, 3);
258 if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) { 255 if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
259 BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3); 256 BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
260 OUT_RING(chan, 0); 257 OUT_RING(chan, 0);
261 OUT_RING(chan, 1); 258 OUT_RING(chan, 1);
@@ -263,12 +260,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
263 } 260 }
264 261
265 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); 262 BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
266 OUT_RING(chan, NvGdiRect); 263 OUT_RING(chan, nfbdev->gdi.handle);
267 BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1); 264 BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
268 OUT_RING(chan, NvCtxSurf2D); 265 OUT_RING(chan, nfbdev->surf2d.handle);
269 BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2); 266 BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
270 OUT_RING(chan, NvImagePatt); 267 OUT_RING(chan, nfbdev->patt.handle);
271 OUT_RING(chan, NvRop); 268 OUT_RING(chan, nfbdev->rop.handle);
272 BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1); 269 BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
273 OUT_RING(chan, 1); 270 OUT_RING(chan, 1);
274 BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1); 271 BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 94eadd1dd10a..239c2c5a9615 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,8 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <engine/fifo.h>
26
27#include "nouveau_drm.h" 25#include "nouveau_drm.h"
28#include "nouveau_dma.h" 26#include "nouveau_dma.h"
29#include "nouveau_fence.h" 27#include "nouveau_fence.h"
@@ -59,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence,
59static u32 57static u32
60nv04_fence_read(struct nouveau_channel *chan) 58nv04_fence_read(struct nouveau_channel *chan)
61{ 59{
62 struct nouveau_fifo_chan *fifo = (void *)chan->object; 60 struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);;
63 return atomic_read(&fifo->refcnt); 61 return atomic_read(&fifo->refcnt);
64} 62}
65 63
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 06f434f03fba..4faaf0acf5d7 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,9 +22,6 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/class.h>
27
28#include "nouveau_drm.h" 25#include "nouveau_drm.h"
29#include "nouveau_dma.h" 26#include "nouveau_dma.h"
30#include "nv10_fence.h" 27#include "nv10_fence.h"
@@ -53,14 +50,18 @@ nv10_fence_sync(struct nouveau_fence *fence,
53u32 50u32
54nv10_fence_read(struct nouveau_channel *chan) 51nv10_fence_read(struct nouveau_channel *chan)
55{ 52{
56 return nv_ro32(chan->object, 0x0048); 53 return nvif_rd32(chan, 0x0048);
57} 54}
58 55
59void 56void
60nv10_fence_context_del(struct nouveau_channel *chan) 57nv10_fence_context_del(struct nouveau_channel *chan)
61{ 58{
62 struct nv10_fence_chan *fctx = chan->fence; 59 struct nv10_fence_chan *fctx = chan->fence;
60 int i;
63 nouveau_fence_context_del(&fctx->base); 61 nouveau_fence_context_del(&fctx->base);
62 for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
63 nvif_object_fini(&fctx->head[i]);
64 nvif_object_fini(&fctx->sema);
64 chan->fence = NULL; 65 chan->fence = NULL;
65 kfree(fctx); 66 kfree(fctx);
66} 67}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index e5d9204826c2..a87259f3983a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,12 +1,13 @@
1#ifndef __NV10_FENCE_H_ 1#ifndef __NV10_FENCE_H_
2#define __NV10_FENCE_H_ 2#define __NV10_FENCE_H_
3 3
4#include <core/os.h>
5#include "nouveau_fence.h" 4#include "nouveau_fence.h"
6#include "nouveau_bo.h" 5#include "nouveau_bo.h"
7 6
8struct nv10_fence_chan { 7struct nv10_fence_chan {
9 struct nouveau_fence_chan base; 8 struct nouveau_fence_chan base;
9 struct nvif_object sema;
10 struct nvif_object head[4];
10}; 11};
11 12
12struct nv10_fence_priv { 13struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 22aa9963ea6f..ca907479f92f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -22,8 +22,8 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <core/object.h> 25#include <nvif/os.h>
26#include <core/class.h> 26#include <nvif/class.h>
27 27
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
@@ -33,11 +33,13 @@ int
33nv17_fence_sync(struct nouveau_fence *fence, 33nv17_fence_sync(struct nouveau_fence *fence,
34 struct nouveau_channel *prev, struct nouveau_channel *chan) 34 struct nouveau_channel *prev, struct nouveau_channel *chan)
35{ 35{
36 struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
36 struct nv10_fence_priv *priv = chan->drm->fence; 37 struct nv10_fence_priv *priv = chan->drm->fence;
38 struct nv10_fence_chan *fctx = chan->fence;
37 u32 value; 39 u32 value;
38 int ret; 40 int ret;
39 41
40 if (!mutex_trylock(&prev->cli->mutex)) 42 if (!mutex_trylock(&cli->mutex))
41 return -EBUSY; 43 return -EBUSY;
42 44
43 spin_lock(&priv->lock); 45 spin_lock(&priv->lock);
@@ -48,7 +50,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
48 ret = RING_SPACE(prev, 5); 50 ret = RING_SPACE(prev, 5);
49 if (!ret) { 51 if (!ret) {
50 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); 52 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
51 OUT_RING (prev, NvSema); 53 OUT_RING (prev, fctx->sema.handle);
52 OUT_RING (prev, 0); 54 OUT_RING (prev, 0);
53 OUT_RING (prev, value + 0); 55 OUT_RING (prev, value + 0);
54 OUT_RING (prev, value + 1); 56 OUT_RING (prev, value + 1);
@@ -57,14 +59,14 @@ nv17_fence_sync(struct nouveau_fence *fence,
57 59
58 if (!ret && !(ret = RING_SPACE(chan, 5))) { 60 if (!ret && !(ret = RING_SPACE(chan, 5))) {
59 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); 61 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
60 OUT_RING (chan, NvSema); 62 OUT_RING (chan, fctx->sema.handle);
61 OUT_RING (chan, 0); 63 OUT_RING (chan, 0);
62 OUT_RING (chan, value + 1); 64 OUT_RING (chan, value + 1);
63 OUT_RING (chan, value + 2); 65 OUT_RING (chan, value + 2);
64 FIRE_RING (chan); 66 FIRE_RING (chan);
65 } 67 }
66 68
67 mutex_unlock(&prev->cli->mutex); 69 mutex_unlock(&cli->mutex);
68 return 0; 70 return 0;
69} 71}
70 72
@@ -74,7 +76,6 @@ nv17_fence_context_new(struct nouveau_channel *chan)
74 struct nv10_fence_priv *priv = chan->drm->fence; 76 struct nv10_fence_priv *priv = chan->drm->fence;
75 struct nv10_fence_chan *fctx; 77 struct nv10_fence_chan *fctx;
76 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 78 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
77 struct nouveau_object *object;
78 u32 start = mem->start * PAGE_SIZE; 79 u32 start = mem->start * PAGE_SIZE;
79 u32 limit = start + mem->size - 1; 80 u32 limit = start + mem->size - 1;
80 int ret = 0; 81 int ret = 0;
@@ -88,15 +89,14 @@ nv17_fence_context_new(struct nouveau_channel *chan)
88 fctx->base.read = nv10_fence_read; 89 fctx->base.read = nv10_fence_read;
89 fctx->base.sync = nv17_fence_sync; 90 fctx->base.sync = nv17_fence_sync;
90 91
91 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 92 ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
92 NvSema, 0x0002, 93 &(struct nv_dma_v0) {
93 &(struct nv_dma_class) { 94 .target = NV_DMA_V0_TARGET_VRAM,
94 .flags = NV_DMA_TARGET_VRAM | 95 .access = NV_DMA_V0_ACCESS_RDWR,
95 NV_DMA_ACCESS_RDWR,
96 .start = start, 96 .start = start,
97 .limit = limit, 97 .limit = limit,
98 }, sizeof(struct nv_dma_class), 98 }, sizeof(struct nv_dma_v0),
99 &object); 99 &fctx->sema);
100 if (ret) 100 if (ret)
101 nv10_fence_context_del(chan); 101 nv10_fence_context_del(chan);
102 return ret; 102 return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4c534b7b04da..03949eaa629f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,6 +28,8 @@
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_dp_helper.h> 29#include <drm/drm_dp_helper.h>
30 30
31#include <nvif/class.h>
32
31#include "nouveau_drm.h" 33#include "nouveau_drm.h"
32#include "nouveau_dma.h" 34#include "nouveau_dma.h"
33#include "nouveau_gem.h" 35#include "nouveau_gem.h"
@@ -37,15 +39,6 @@
37#include "nouveau_fence.h" 39#include "nouveau_fence.h"
38#include "nv50_display.h" 40#include "nv50_display.h"
39 41
40#include <core/client.h>
41#include <core/gpuobj.h>
42#include <core/class.h>
43
44#include <subdev/timer.h>
45#include <subdev/bar.h>
46#include <subdev/fb.h>
47#include <subdev/i2c.h>
48
49#define EVO_DMA_NR 9 42#define EVO_DMA_NR 9
50 43
51#define EVO_MASTER (0x00) 44#define EVO_MASTER (0x00)
@@ -60,45 +53,34 @@
60#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) 53#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
61#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) 54#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
62 55
63#define EVO_CORE_HANDLE (0xd1500000)
64#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
65#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
66#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
67 (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
68
69/****************************************************************************** 56/******************************************************************************
70 * EVO channel 57 * EVO channel
71 *****************************************************************************/ 58 *****************************************************************************/
72 59
73struct nv50_chan { 60struct nv50_chan {
74 struct nouveau_object *user; 61 struct nvif_object user;
75 u32 handle;
76}; 62};
77 63
78static int 64static int
79nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, 65nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
80 void *data, u32 size, struct nv50_chan *chan) 66 void *data, u32 size, struct nv50_chan *chan)
81{ 67{
82 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); 68 while (oclass[0]) {
83 const u32 oclass = EVO_CHAN_OCLASS(bclass, core); 69 int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
84 const u32 handle = EVO_CHAN_HANDLE(bclass, head); 70 oclass[0], data, size,
85 int ret; 71 &chan->user);
86 72 if (oclass++, ret == 0) {
87 ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, 73 nvif_object_map(&chan->user);
88 oclass, data, size, &chan->user); 74 return ret;
89 if (ret) 75 }
90 return ret; 76 }
91 77 return -ENOSYS;
92 chan->handle = handle;
93 return 0;
94} 78}
95 79
96static void 80static void
97nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) 81nv50_chan_destroy(struct nv50_chan *chan)
98{ 82{
99 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); 83 nvif_object_fini(&chan->user);
100 if (chan->handle)
101 nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
102} 84}
103 85
104/****************************************************************************** 86/******************************************************************************
@@ -110,16 +92,70 @@ struct nv50_pioc {
110}; 92};
111 93
112static void 94static void
113nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) 95nv50_pioc_destroy(struct nv50_pioc *pioc)
114{ 96{
115 nv50_chan_destroy(core, &pioc->base); 97 nv50_chan_destroy(&pioc->base);
116} 98}
117 99
118static int 100static int
119nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, 101nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
120 void *data, u32 size, struct nv50_pioc *pioc) 102 void *data, u32 size, struct nv50_pioc *pioc)
121{ 103{
122 return nv50_chan_create(core, bclass, head, data, size, &pioc->base); 104 return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
105}
106
107/******************************************************************************
108 * Cursor Immediate
109 *****************************************************************************/
110
111struct nv50_curs {
112 struct nv50_pioc base;
113};
114
115static int
116nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
117{
118 struct nv50_disp_cursor_v0 args = {
119 .head = head,
120 };
121 static const u32 oclass[] = {
122 GK104_DISP_CURSOR,
123 GF110_DISP_CURSOR,
124 GT214_DISP_CURSOR,
125 G82_DISP_CURSOR,
126 NV50_DISP_CURSOR,
127 0
128 };
129
130 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
131 &curs->base);
132}
133
134/******************************************************************************
135 * Overlay Immediate
136 *****************************************************************************/
137
138struct nv50_oimm {
139 struct nv50_pioc base;
140};
141
142static int
143nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
144{
145 struct nv50_disp_cursor_v0 args = {
146 .head = head,
147 };
148 static const u32 oclass[] = {
149 GK104_DISP_OVERLAY,
150 GF110_DISP_OVERLAY,
151 GT214_DISP_OVERLAY,
152 G82_DISP_OVERLAY,
153 NV50_DISP_OVERLAY,
154 0
155 };
156
157 return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
158 &oimm->base);
123} 159}
124 160
125/****************************************************************************** 161/******************************************************************************
@@ -131,6 +167,9 @@ struct nv50_dmac {
131 dma_addr_t handle; 167 dma_addr_t handle;
132 u32 *ptr; 168 u32 *ptr;
133 169
170 struct nvif_object sync;
171 struct nvif_object vram;
172
134 /* Protects against concurrent pushbuf access to this channel, lock is 173 /* Protects against concurrent pushbuf access to this channel, lock is
135 * grabbed by evo_wait (if the pushbuf reservation is successful) and 174 * grabbed by evo_wait (if the pushbuf reservation is successful) and
136 * dropped again by evo_kick. */ 175 * dropped again by evo_kick. */
@@ -138,207 +177,113 @@ struct nv50_dmac {
138}; 177};
139 178
140static void 179static void
141nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) 180nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
142{ 181{
182 nvif_object_fini(&dmac->vram);
183 nvif_object_fini(&dmac->sync);
184
185 nv50_chan_destroy(&dmac->base);
186
143 if (dmac->ptr) { 187 if (dmac->ptr) {
144 struct pci_dev *pdev = nv_device(core)->pdev; 188 struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev;
145 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); 189 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
146 } 190 }
147
148 nv50_chan_destroy(core, &dmac->base);
149}
150
151static int
152nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
153{
154 struct nouveau_fb *pfb = nouveau_fb(core);
155 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
156 struct nouveau_object *object;
157 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
158 NV_DMA_IN_MEMORY_CLASS,
159 &(struct nv_dma_class) {
160 .flags = NV_DMA_TARGET_VRAM |
161 NV_DMA_ACCESS_RDWR,
162 .start = 0,
163 .limit = pfb->ram->size - 1,
164 .conf0 = NV50_DMA_CONF0_ENABLE |
165 NV50_DMA_CONF0_PART_256,
166 }, sizeof(struct nv_dma_class), &object);
167 if (ret)
168 return ret;
169
170 ret = nouveau_object_new(client, parent, NvEvoFB16,
171 NV_DMA_IN_MEMORY_CLASS,
172 &(struct nv_dma_class) {
173 .flags = NV_DMA_TARGET_VRAM |
174 NV_DMA_ACCESS_RDWR,
175 .start = 0,
176 .limit = pfb->ram->size - 1,
177 .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
178 NV50_DMA_CONF0_PART_256,
179 }, sizeof(struct nv_dma_class), &object);
180 if (ret)
181 return ret;
182
183 ret = nouveau_object_new(client, parent, NvEvoFB32,
184 NV_DMA_IN_MEMORY_CLASS,
185 &(struct nv_dma_class) {
186 .flags = NV_DMA_TARGET_VRAM |
187 NV_DMA_ACCESS_RDWR,
188 .start = 0,
189 .limit = pfb->ram->size - 1,
190 .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
191 NV50_DMA_CONF0_PART_256,
192 }, sizeof(struct nv_dma_class), &object);
193 return ret;
194}
195
196static int
197nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
198{
199 struct nouveau_fb *pfb = nouveau_fb(core);
200 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
201 struct nouveau_object *object;
202 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
203 NV_DMA_IN_MEMORY_CLASS,
204 &(struct nv_dma_class) {
205 .flags = NV_DMA_TARGET_VRAM |
206 NV_DMA_ACCESS_RDWR,
207 .start = 0,
208 .limit = pfb->ram->size - 1,
209 .conf0 = NVC0_DMA_CONF0_ENABLE,
210 }, sizeof(struct nv_dma_class), &object);
211 if (ret)
212 return ret;
213
214 ret = nouveau_object_new(client, parent, NvEvoFB16,
215 NV_DMA_IN_MEMORY_CLASS,
216 &(struct nv_dma_class) {
217 .flags = NV_DMA_TARGET_VRAM |
218 NV_DMA_ACCESS_RDWR,
219 .start = 0,
220 .limit = pfb->ram->size - 1,
221 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
222 }, sizeof(struct nv_dma_class), &object);
223 if (ret)
224 return ret;
225
226 ret = nouveau_object_new(client, parent, NvEvoFB32,
227 NV_DMA_IN_MEMORY_CLASS,
228 &(struct nv_dma_class) {
229 .flags = NV_DMA_TARGET_VRAM |
230 NV_DMA_ACCESS_RDWR,
231 .start = 0,
232 .limit = pfb->ram->size - 1,
233 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
234 }, sizeof(struct nv_dma_class), &object);
235 return ret;
236}
237
238static int
239nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
240{
241 struct nouveau_fb *pfb = nouveau_fb(core);
242 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
243 struct nouveau_object *object;
244 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
245 NV_DMA_IN_MEMORY_CLASS,
246 &(struct nv_dma_class) {
247 .flags = NV_DMA_TARGET_VRAM |
248 NV_DMA_ACCESS_RDWR,
249 .start = 0,
250 .limit = pfb->ram->size - 1,
251 .conf0 = NVD0_DMA_CONF0_ENABLE |
252 NVD0_DMA_CONF0_PAGE_LP,
253 }, sizeof(struct nv_dma_class), &object);
254 if (ret)
255 return ret;
256
257 ret = nouveau_object_new(client, parent, NvEvoFB32,
258 NV_DMA_IN_MEMORY_CLASS,
259 &(struct nv_dma_class) {
260 .flags = NV_DMA_TARGET_VRAM |
261 NV_DMA_ACCESS_RDWR,
262 .start = 0,
263 .limit = pfb->ram->size - 1,
264 .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
265 NVD0_DMA_CONF0_PAGE_LP,
266 }, sizeof(struct nv_dma_class), &object);
267 return ret;
268} 191}
269 192
270static int 193static int
271nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, 194nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
272 void *data, u32 size, u64 syncbuf, 195 void *data, u32 size, u64 syncbuf,
273 struct nv50_dmac *dmac) 196 struct nv50_dmac *dmac)
274{ 197{
275 struct nouveau_fb *pfb = nouveau_fb(core); 198 struct nvif_device *device = nvif_device(disp);
276 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); 199 struct nv50_disp_core_channel_dma_v0 *args = data;
277 struct nouveau_object *object; 200 struct nvif_object pushbuf;
278 u32 pushbuf = *(u32 *)data;
279 int ret; 201 int ret;
280 202
281 mutex_init(&dmac->lock); 203 mutex_init(&dmac->lock);
282 204
283 dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, 205 dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev,
284 &dmac->handle); 206 PAGE_SIZE, &dmac->handle);
285 if (!dmac->ptr) 207 if (!dmac->ptr)
286 return -ENOMEM; 208 return -ENOMEM;
287 209
288 ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, 210 ret = nvif_object_init(nvif_object(device), NULL,
289 NV_DMA_FROM_MEMORY_CLASS, 211 args->pushbuf, NV_DMA_FROM_MEMORY,
290 &(struct nv_dma_class) { 212 &(struct nv_dma_v0) {
291 .flags = NV_DMA_TARGET_PCI_US | 213 .target = NV_DMA_V0_TARGET_PCI_US,
292 NV_DMA_ACCESS_RD, 214 .access = NV_DMA_V0_ACCESS_RD,
293 .start = dmac->handle + 0x0000, 215 .start = dmac->handle + 0x0000,
294 .limit = dmac->handle + 0x0fff, 216 .limit = dmac->handle + 0x0fff,
295 }, sizeof(struct nv_dma_class), &object); 217 }, sizeof(struct nv_dma_v0), &pushbuf);
296 if (ret) 218 if (ret)
297 return ret; 219 return ret;
298 220
299 ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); 221 ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
222 nvif_object_fini(&pushbuf);
300 if (ret) 223 if (ret)
301 return ret; 224 return ret;
302 225
303 ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, 226 ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
304 NV_DMA_IN_MEMORY_CLASS, 227 NV_DMA_IN_MEMORY,
305 &(struct nv_dma_class) { 228 &(struct nv_dma_v0) {
306 .flags = NV_DMA_TARGET_VRAM | 229 .target = NV_DMA_V0_TARGET_VRAM,
307 NV_DMA_ACCESS_RDWR, 230 .access = NV_DMA_V0_ACCESS_RDWR,
308 .start = syncbuf + 0x0000, 231 .start = syncbuf + 0x0000,
309 .limit = syncbuf + 0x0fff, 232 .limit = syncbuf + 0x0fff,
310 }, sizeof(struct nv_dma_class), &object); 233 }, sizeof(struct nv_dma_v0),
234 &dmac->sync);
311 if (ret) 235 if (ret)
312 return ret; 236 return ret;
313 237
314 ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, 238 ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
315 NV_DMA_IN_MEMORY_CLASS, 239 NV_DMA_IN_MEMORY,
316 &(struct nv_dma_class) { 240 &(struct nv_dma_v0) {
317 .flags = NV_DMA_TARGET_VRAM | 241 .target = NV_DMA_V0_TARGET_VRAM,
318 NV_DMA_ACCESS_RDWR, 242 .access = NV_DMA_V0_ACCESS_RDWR,
319 .start = 0, 243 .start = 0,
320 .limit = pfb->ram->size - 1, 244 .limit = device->info.ram_user - 1,
321 }, sizeof(struct nv_dma_class), &object); 245 }, sizeof(struct nv_dma_v0),
246 &dmac->vram);
322 if (ret) 247 if (ret)
323 return ret; 248 return ret;
324 249
325 if (nv_device(core)->card_type < NV_C0)
326 ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
327 else
328 if (nv_device(core)->card_type < NV_D0)
329 ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
330 else
331 ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
332 return ret; 250 return ret;
333} 251}
334 252
253/******************************************************************************
254 * Core
255 *****************************************************************************/
256
335struct nv50_mast { 257struct nv50_mast {
336 struct nv50_dmac base; 258 struct nv50_dmac base;
337}; 259};
338 260
339struct nv50_curs { 261static int
340 struct nv50_pioc base; 262nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
341}; 263{
264 struct nv50_disp_core_channel_dma_v0 args = {
265 .pushbuf = 0xb0007d00,
266 };
267 static const u32 oclass[] = {
268 GM107_DISP_CORE_CHANNEL_DMA,
269 GK110_DISP_CORE_CHANNEL_DMA,
270 GK104_DISP_CORE_CHANNEL_DMA,
271 GF110_DISP_CORE_CHANNEL_DMA,
272 GT214_DISP_CORE_CHANNEL_DMA,
273 GT206_DISP_CORE_CHANNEL_DMA,
274 GT200_DISP_CORE_CHANNEL_DMA,
275 G82_DISP_CORE_CHANNEL_DMA,
276 NV50_DISP_CORE_CHANNEL_DMA,
277 0
278 };
279
280 return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
281 &core->base);
282}
283
284/******************************************************************************
285 * Base
286 *****************************************************************************/
342 287
343struct nv50_sync { 288struct nv50_sync {
344 struct nv50_dmac base; 289 struct nv50_dmac base;
@@ -346,13 +291,58 @@ struct nv50_sync {
346 u32 data; 291 u32 data;
347}; 292};
348 293
294static int
295nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
296 struct nv50_sync *base)
297{
298 struct nv50_disp_base_channel_dma_v0 args = {
299 .pushbuf = 0xb0007c00 | head,
300 .head = head,
301 };
302 static const u32 oclass[] = {
303 GK110_DISP_BASE_CHANNEL_DMA,
304 GK104_DISP_BASE_CHANNEL_DMA,
305 GF110_DISP_BASE_CHANNEL_DMA,
306 GT214_DISP_BASE_CHANNEL_DMA,
307 GT200_DISP_BASE_CHANNEL_DMA,
308 G82_DISP_BASE_CHANNEL_DMA,
309 NV50_DISP_BASE_CHANNEL_DMA,
310 0
311 };
312
313 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
314 syncbuf, &base->base);
315}
316
317/******************************************************************************
318 * Overlay
319 *****************************************************************************/
320
349struct nv50_ovly { 321struct nv50_ovly {
350 struct nv50_dmac base; 322 struct nv50_dmac base;
351}; 323};
352 324
353struct nv50_oimm { 325static int
354 struct nv50_pioc base; 326nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
355}; 327 struct nv50_ovly *ovly)
328{
329 struct nv50_disp_overlay_channel_dma_v0 args = {
330 .pushbuf = 0xb0007e00 | head,
331 .head = head,
332 };
333 static const u32 oclass[] = {
334 GK104_DISP_OVERLAY_CONTROL_DMA,
335 GF110_DISP_OVERLAY_CONTROL_DMA,
336 GT214_DISP_OVERLAY_CHANNEL_DMA,
337 GT200_DISP_OVERLAY_CHANNEL_DMA,
338 G82_DISP_OVERLAY_CHANNEL_DMA,
339 NV50_DISP_OVERLAY_CHANNEL_DMA,
340 0
341 };
342
343 return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
344 syncbuf, &ovly->base);
345}
356 346
357struct nv50_head { 347struct nv50_head {
358 struct nouveau_crtc base; 348 struct nouveau_crtc base;
@@ -369,13 +359,19 @@ struct nv50_head {
369#define nv50_ovly(c) (&nv50_head(c)->ovly) 359#define nv50_ovly(c) (&nv50_head(c)->ovly)
370#define nv50_oimm(c) (&nv50_head(c)->oimm) 360#define nv50_oimm(c) (&nv50_head(c)->oimm)
371#define nv50_chan(c) (&(c)->base.base) 361#define nv50_chan(c) (&(c)->base.base)
372#define nv50_vers(c) nv_mclass(nv50_chan(c)->user) 362#define nv50_vers(c) nv50_chan(c)->user.oclass
363
364struct nv50_fbdma {
365 struct list_head head;
366 struct nvif_object core;
367 struct nvif_object base[4];
368};
373 369
374struct nv50_disp { 370struct nv50_disp {
375 struct nouveau_object *core; 371 struct nvif_object *disp;
376 struct nv50_mast mast; 372 struct nv50_mast mast;
377 373
378 u32 modeset; 374 struct list_head fbdma;
379 375
380 struct nouveau_bo *sync; 376 struct nouveau_bo *sync;
381}; 377};
@@ -401,16 +397,16 @@ static u32 *
401evo_wait(void *evoc, int nr) 397evo_wait(void *evoc, int nr)
402{ 398{
403 struct nv50_dmac *dmac = evoc; 399 struct nv50_dmac *dmac = evoc;
404 u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; 400 u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
405 401
406 mutex_lock(&dmac->lock); 402 mutex_lock(&dmac->lock);
407 if (put + nr >= (PAGE_SIZE / 4) - 8) { 403 if (put + nr >= (PAGE_SIZE / 4) - 8) {
408 dmac->ptr[put] = 0x20000000; 404 dmac->ptr[put] = 0x20000000;
409 405
410 nv_wo32(dmac->base.user, 0x0000, 0x00000000); 406 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
411 if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { 407 if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
412 mutex_unlock(&dmac->lock); 408 mutex_unlock(&dmac->lock);
413 NV_ERROR(dmac->base.user, "channel stalled\n"); 409 nv_error(nvkm_object(&dmac->base.user), "channel stalled\n");
414 return NULL; 410 return NULL;
415 } 411 }
416 412
@@ -424,7 +420,7 @@ static void
424evo_kick(u32 *push, void *evoc) 420evo_kick(u32 *push, void *evoc)
425{ 421{
426 struct nv50_dmac *dmac = evoc; 422 struct nv50_dmac *dmac = evoc;
427 nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); 423 nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
428 mutex_unlock(&dmac->lock); 424 mutex_unlock(&dmac->lock);
429} 425}
430 426
@@ -443,7 +439,7 @@ evo_sync_wait(void *data)
443static int 439static int
444evo_sync(struct drm_device *dev) 440evo_sync(struct drm_device *dev)
445{ 441{
446 struct nouveau_device *device = nouveau_dev(dev); 442 struct nvif_device *device = &nouveau_drm(dev)->device;
447 struct nv50_disp *disp = nv50_disp(dev); 443 struct nv50_disp *disp = nv50_disp(dev);
448 struct nv50_mast *mast = nv50_mast(dev); 444 struct nv50_mast *mast = nv50_mast(dev);
449 u32 *push = evo_wait(mast, 8); 445 u32 *push = evo_wait(mast, 8);
@@ -455,7 +451,7 @@ evo_sync(struct drm_device *dev)
455 evo_data(push, 0x00000000); 451 evo_data(push, 0x00000000);
456 evo_data(push, 0x00000000); 452 evo_data(push, 0x00000000);
457 evo_kick(push, mast); 453 evo_kick(push, mast);
458 if (nv_wait_cb(device, evo_sync_wait, disp->sync)) 454 if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync))
459 return 0; 455 return 0;
460 } 456 }
461 457
@@ -490,7 +486,7 @@ nv50_display_flip_wait(void *data)
490void 486void
491nv50_display_flip_stop(struct drm_crtc *crtc) 487nv50_display_flip_stop(struct drm_crtc *crtc)
492{ 488{
493 struct nouveau_device *device = nouveau_dev(crtc->dev); 489 struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
494 struct nv50_display_flip flip = { 490 struct nv50_display_flip flip = {
495 .disp = nv50_disp(crtc->dev), 491 .disp = nv50_disp(crtc->dev),
496 .chan = nv50_sync(crtc), 492 .chan = nv50_sync(crtc),
@@ -510,7 +506,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
510 evo_kick(push, flip.chan); 506 evo_kick(push, flip.chan);
511 } 507 }
512 508
513 nv_wait_cb(device, nv50_display_flip_wait, &flip); 509 nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip);
514} 510}
515 511
516int 512int
@@ -534,7 +530,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
534 if (unlikely(push == NULL)) 530 if (unlikely(push == NULL))
535 return -EBUSY; 531 return -EBUSY;
536 532
537 if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { 533 if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
538 ret = RING_SPACE(chan, 8); 534 ret = RING_SPACE(chan, 8);
539 if (ret) 535 if (ret)
540 return ret; 536 return ret;
@@ -548,14 +544,14 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
548 OUT_RING (chan, sync->addr); 544 OUT_RING (chan, sync->addr);
549 OUT_RING (chan, sync->data); 545 OUT_RING (chan, sync->data);
550 } else 546 } else
551 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 547 if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
552 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; 548 u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
553 ret = RING_SPACE(chan, 12); 549 ret = RING_SPACE(chan, 12);
554 if (ret) 550 if (ret)
555 return ret; 551 return ret;
556 552
557 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 553 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
558 OUT_RING (chan, chan->vram); 554 OUT_RING (chan, chan->vram.handle);
559 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 555 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
560 OUT_RING (chan, upper_32_bits(addr ^ 0x10)); 556 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
561 OUT_RING (chan, lower_32_bits(addr ^ 0x10)); 557 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
@@ -606,16 +602,16 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
606 evo_data(push, sync->addr); 602 evo_data(push, sync->addr);
607 evo_data(push, sync->data++); 603 evo_data(push, sync->data++);
608 evo_data(push, sync->data); 604 evo_data(push, sync->data);
609 evo_data(push, NvEvoSync); 605 evo_data(push, sync->base.sync.handle);
610 evo_mthd(push, 0x00a0, 2); 606 evo_mthd(push, 0x00a0, 2);
611 evo_data(push, 0x00000000); 607 evo_data(push, 0x00000000);
612 evo_data(push, 0x00000000); 608 evo_data(push, 0x00000000);
613 evo_mthd(push, 0x00c0, 1); 609 evo_mthd(push, 0x00c0, 1);
614 evo_data(push, nv_fb->r_dma); 610 evo_data(push, nv_fb->r_handle);
615 evo_mthd(push, 0x0110, 2); 611 evo_mthd(push, 0x0110, 2);
616 evo_data(push, 0x00000000); 612 evo_data(push, 0x00000000);
617 evo_data(push, 0x00000000); 613 evo_data(push, 0x00000000);
618 if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { 614 if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
619 evo_mthd(push, 0x0800, 5); 615 evo_mthd(push, 0x0800, 5);
620 evo_data(push, nv_fb->nvbo->bo.offset >> 8); 616 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
621 evo_data(push, 0); 617 evo_data(push, 0);
@@ -667,11 +663,11 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
667 663
668 push = evo_wait(mast, 4); 664 push = evo_wait(mast, 4);
669 if (push) { 665 if (push) {
670 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 666 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
671 evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); 667 evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
672 evo_data(push, mode); 668 evo_data(push, mode);
673 } else 669 } else
674 if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { 670 if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
675 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); 671 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
676 evo_data(push, mode); 672 evo_data(push, mode);
677 } else { 673 } else {
@@ -762,7 +758,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
762 758
763 push = evo_wait(mast, 8); 759 push = evo_wait(mast, 8);
764 if (push) { 760 if (push) {
765 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 761 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
766 /*XXX: SCALE_CTRL_ACTIVE??? */ 762 /*XXX: SCALE_CTRL_ACTIVE??? */
767 evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); 763 evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
768 evo_data(push, (oY << 16) | oX); 764 evo_data(push, (oY << 16) | oX);
@@ -807,7 +803,7 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
807 803
808 push = evo_wait(mast, 16); 804 push = evo_wait(mast, 16);
809 if (push) { 805 if (push) {
810 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 806 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
811 evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); 807 evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
812 evo_data(push, (hue << 20) | (vib << 8)); 808 evo_data(push, (hue << 20) | (vib << 8));
813 } else { 809 } else {
@@ -835,7 +831,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
835 831
836 push = evo_wait(mast, 16); 832 push = evo_wait(mast, 16);
837 if (push) { 833 if (push) {
838 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 834 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
839 evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); 835 evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
840 evo_data(push, nvfb->nvbo->bo.offset >> 8); 836 evo_data(push, nvfb->nvbo->bo.offset >> 8);
841 evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); 837 evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
@@ -844,9 +840,9 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
844 evo_data(push, nvfb->r_format); 840 evo_data(push, nvfb->r_format);
845 evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); 841 evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
846 evo_data(push, (y << 16) | x); 842 evo_data(push, (y << 16) | x);
847 if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { 843 if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
848 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 844 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
849 evo_data(push, nvfb->r_dma); 845 evo_data(push, nvfb->r_handle);
850 } 846 }
851 } else { 847 } else {
852 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); 848 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
@@ -855,7 +851,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
855 evo_data(push, (fb->height << 16) | fb->width); 851 evo_data(push, (fb->height << 16) | fb->width);
856 evo_data(push, nvfb->r_pitch); 852 evo_data(push, nvfb->r_pitch);
857 evo_data(push, nvfb->r_format); 853 evo_data(push, nvfb->r_format);
858 evo_data(push, nvfb->r_dma); 854 evo_data(push, nvfb->r_handle);
859 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); 855 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
860 evo_data(push, (y << 16) | x); 856 evo_data(push, (y << 16) | x);
861 } 857 }
@@ -867,7 +863,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
867 evo_kick(push, mast); 863 evo_kick(push, mast);
868 } 864 }
869 865
870 nv_crtc->fb.tile_flags = nvfb->r_dma; 866 nv_crtc->fb.handle = nvfb->r_handle;
871 return 0; 867 return 0;
872} 868}
873 869
@@ -877,23 +873,23 @@ nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
877 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 873 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
878 u32 *push = evo_wait(mast, 16); 874 u32 *push = evo_wait(mast, 16);
879 if (push) { 875 if (push) {
880 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { 876 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
881 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); 877 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
882 evo_data(push, 0x85000000); 878 evo_data(push, 0x85000000);
883 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 879 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
884 } else 880 } else
885 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 881 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
886 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); 882 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
887 evo_data(push, 0x85000000); 883 evo_data(push, 0x85000000);
888 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 884 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
889 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); 885 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
890 evo_data(push, NvEvoVRAM); 886 evo_data(push, mast->base.vram.handle);
891 } else { 887 } else {
892 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); 888 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
893 evo_data(push, 0x85000000); 889 evo_data(push, 0x85000000);
894 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 890 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
895 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); 891 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
896 evo_data(push, NvEvoVRAM); 892 evo_data(push, mast->base.vram.handle);
897 } 893 }
898 evo_kick(push, mast); 894 evo_kick(push, mast);
899 } 895 }
@@ -905,11 +901,11 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
905 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 901 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
906 u32 *push = evo_wait(mast, 16); 902 u32 *push = evo_wait(mast, 16);
907 if (push) { 903 if (push) {
908 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { 904 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
909 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); 905 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
910 evo_data(push, 0x05000000); 906 evo_data(push, 0x05000000);
911 } else 907 } else
912 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 908 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
913 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); 909 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
914 evo_data(push, 0x05000000); 910 evo_data(push, 0x05000000);
915 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); 911 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
@@ -960,13 +956,13 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
960 956
961 push = evo_wait(mast, 6); 957 push = evo_wait(mast, 6);
962 if (push) { 958 if (push) {
963 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { 959 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
964 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 960 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
965 evo_data(push, 0x00000000); 961 evo_data(push, 0x00000000);
966 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); 962 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
967 evo_data(push, 0x40000000); 963 evo_data(push, 0x40000000);
968 } else 964 } else
969 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 965 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
970 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 966 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
971 evo_data(push, 0x00000000); 967 evo_data(push, 0x00000000);
972 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); 968 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
@@ -997,31 +993,31 @@ nv50_crtc_commit(struct drm_crtc *crtc)
997 993
998 push = evo_wait(mast, 32); 994 push = evo_wait(mast, 32);
999 if (push) { 995 if (push) {
1000 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { 996 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
1001 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 997 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
1002 evo_data(push, NvEvoVRAM_LP); 998 evo_data(push, nv_crtc->fb.handle);
1003 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); 999 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
1004 evo_data(push, 0xc0000000); 1000 evo_data(push, 0xc0000000);
1005 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); 1001 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
1006 } else 1002 } else
1007 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 1003 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1008 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); 1004 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
1009 evo_data(push, nv_crtc->fb.tile_flags); 1005 evo_data(push, nv_crtc->fb.handle);
1010 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); 1006 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
1011 evo_data(push, 0xc0000000); 1007 evo_data(push, 0xc0000000);
1012 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); 1008 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
1013 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); 1009 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
1014 evo_data(push, NvEvoVRAM); 1010 evo_data(push, mast->base.vram.handle);
1015 } else { 1011 } else {
1016 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); 1012 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
1017 evo_data(push, nv_crtc->fb.tile_flags); 1013 evo_data(push, nv_crtc->fb.handle);
1018 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); 1014 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
1019 evo_data(push, 0x83000000); 1015 evo_data(push, 0x83000000);
1020 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); 1016 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
1021 evo_data(push, 0x00000000); 1017 evo_data(push, 0x00000000);
1022 evo_data(push, 0x00000000); 1018 evo_data(push, 0x00000000);
1023 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); 1019 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
1024 evo_data(push, NvEvoVRAM); 1020 evo_data(push, mast->base.vram.handle);
1025 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); 1021 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
1026 evo_data(push, 0xffffff00); 1022 evo_data(push, 0xffffff00);
1027 } 1023 }
@@ -1099,7 +1095,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1099 1095
1100 push = evo_wait(mast, 64); 1096 push = evo_wait(mast, 64);
1101 if (push) { 1097 if (push) {
1102 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 1098 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1103 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); 1099 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
1104 evo_data(push, 0x00800000 | mode->clock); 1100 evo_data(push, 0x00800000 | mode->clock);
1105 evo_data(push, (ilace == 2) ? 2 : 0); 1101 evo_data(push, (ilace == 2) ? 2 : 0);
@@ -1192,7 +1188,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
1192 u16 g = nv_crtc->lut.g[i] >> 2; 1188 u16 g = nv_crtc->lut.g[i] >> 2;
1193 u16 b = nv_crtc->lut.b[i] >> 2; 1189 u16 b = nv_crtc->lut.b[i] >> 2;
1194 1190
1195 if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { 1191 if (disp->disp->oclass < GF110_DISP) {
1196 writew(r + 0x0000, lut + (i * 0x08) + 0); 1192 writew(r + 0x0000, lut + (i * 0x08) + 0);
1197 writew(g + 0x0000, lut + (i * 0x08) + 2); 1193 writew(g + 0x0000, lut + (i * 0x08) + 2);
1198 writew(b + 0x0000, lut + (i * 0x08) + 4); 1194 writew(b + 0x0000, lut + (i * 0x08) + 4);
@@ -1259,8 +1255,8 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1259{ 1255{
1260 struct nv50_curs *curs = nv50_curs(crtc); 1256 struct nv50_curs *curs = nv50_curs(crtc);
1261 struct nv50_chan *chan = nv50_chan(curs); 1257 struct nv50_chan *chan = nv50_chan(curs);
1262 nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); 1258 nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
1263 nv_wo32(chan->user, 0x0080, 0x00000000); 1259 nvif_wr32(&chan->user, 0x0080, 0x00000000);
1264 return 0; 1260 return 0;
1265} 1261}
1266 1262
@@ -1287,11 +1283,16 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
1287 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1283 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1288 struct nv50_disp *disp = nv50_disp(crtc->dev); 1284 struct nv50_disp *disp = nv50_disp(crtc->dev);
1289 struct nv50_head *head = nv50_head(crtc); 1285 struct nv50_head *head = nv50_head(crtc);
1286 struct nv50_fbdma *fbdma;
1287
1288 list_for_each_entry(fbdma, &disp->fbdma, head) {
1289 nvif_object_fini(&fbdma->base[nv_crtc->index]);
1290 }
1290 1291
1291 nv50_dmac_destroy(disp->core, &head->ovly.base); 1292 nv50_dmac_destroy(&head->ovly.base, disp->disp);
1292 nv50_pioc_destroy(disp->core, &head->oimm.base); 1293 nv50_pioc_destroy(&head->oimm.base);
1293 nv50_dmac_destroy(disp->core, &head->sync.base); 1294 nv50_dmac_destroy(&head->sync.base, disp->disp);
1294 nv50_pioc_destroy(disp->core, &head->curs.base); 1295 nv50_pioc_destroy(&head->curs.base);
1295 1296
1296 /*XXX: this shouldn't be necessary, but the core doesn't call 1297 /*XXX: this shouldn't be necessary, but the core doesn't call
1297 * disconnect() during the cleanup paths 1298 * disconnect() during the cleanup paths
@@ -1346,7 +1347,7 @@ nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
1346} 1347}
1347 1348
1348static int 1349static int
1349nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) 1350nv50_crtc_create(struct drm_device *dev, int index)
1350{ 1351{
1351 struct nv50_disp *disp = nv50_disp(dev); 1352 struct nv50_disp *disp = nv50_disp(dev);
1352 struct nv50_head *head; 1353 struct nv50_head *head;
@@ -1395,11 +1396,7 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1395 nv50_crtc_lut_load(crtc); 1396 nv50_crtc_lut_load(crtc);
1396 1397
1397 /* allocate cursor resources */ 1398 /* allocate cursor resources */
1398 ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, 1399 ret = nv50_curs_create(disp->disp, index, &head->curs);
1399 &(struct nv50_display_curs_class) {
1400 .head = index,
1401 }, sizeof(struct nv50_display_curs_class),
1402 &head->curs.base);
1403 if (ret) 1400 if (ret)
1404 goto out; 1401 goto out;
1405 1402
@@ -1420,12 +1417,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1420 goto out; 1417 goto out;
1421 1418
1422 /* allocate page flip / sync resources */ 1419 /* allocate page flip / sync resources */
1423 ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, 1420 ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
1424 &(struct nv50_display_sync_class) { 1421 &head->sync);
1425 .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
1426 .head = index,
1427 }, sizeof(struct nv50_display_sync_class),
1428 disp->sync->bo.offset, &head->sync.base);
1429 if (ret) 1422 if (ret)
1430 goto out; 1423 goto out;
1431 1424
@@ -1433,20 +1426,12 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1433 head->sync.data = 0x00000000; 1426 head->sync.data = 0x00000000;
1434 1427
1435 /* allocate overlay resources */ 1428 /* allocate overlay resources */
1436 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, 1429 ret = nv50_oimm_create(disp->disp, index, &head->oimm);
1437 &(struct nv50_display_oimm_class) {
1438 .head = index,
1439 }, sizeof(struct nv50_display_oimm_class),
1440 &head->oimm.base);
1441 if (ret) 1430 if (ret)
1442 goto out; 1431 goto out;
1443 1432
1444 ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, 1433 ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
1445 &(struct nv50_display_ovly_class) { 1434 &head->ovly);
1446 .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
1447 .head = index,
1448 }, sizeof(struct nv50_display_ovly_class),
1449 disp->sync->bo.offset, &head->ovly.base);
1450 if (ret) 1435 if (ret)
1451 goto out; 1436 goto out;
1452 1437
@@ -1464,16 +1449,23 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
1464{ 1449{
1465 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1450 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1466 struct nv50_disp *disp = nv50_disp(encoder->dev); 1451 struct nv50_disp *disp = nv50_disp(encoder->dev);
1467 int or = nv_encoder->or; 1452 struct {
1468 u32 dpms_ctrl; 1453 struct nv50_disp_mthd_v1 base;
1469 1454 struct nv50_disp_dac_pwr_v0 pwr;
1470 dpms_ctrl = 0x00000000; 1455 } args = {
1471 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) 1456 .base.version = 1,
1472 dpms_ctrl |= 0x00000001; 1457 .base.method = NV50_DISP_MTHD_V1_DAC_PWR,
1473 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) 1458 .base.hasht = nv_encoder->dcb->hasht,
1474 dpms_ctrl |= 0x00000004; 1459 .base.hashm = nv_encoder->dcb->hashm,
1460 .pwr.state = 1,
1461 .pwr.data = 1,
1462 .pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND &&
1463 mode != DRM_MODE_DPMS_OFF),
1464 .pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY &&
1465 mode != DRM_MODE_DPMS_OFF),
1466 };
1475 1467
1476 nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); 1468 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1477} 1469}
1478 1470
1479static bool 1471static bool
@@ -1514,7 +1506,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1514 1506
1515 push = evo_wait(mast, 8); 1507 push = evo_wait(mast, 8);
1516 if (push) { 1508 if (push) {
1517 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 1509 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1518 u32 syncs = 0x00000000; 1510 u32 syncs = 0x00000000;
1519 1511
1520 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 1512 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1563,7 +1555,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
1563 1555
1564 push = evo_wait(mast, 4); 1556 push = evo_wait(mast, 4);
1565 if (push) { 1557 if (push) {
1566 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 1558 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1567 evo_mthd(push, 0x0400 + (or * 0x080), 1); 1559 evo_mthd(push, 0x0400 + (or * 0x080), 1);
1568 evo_data(push, 0x00000000); 1560 evo_data(push, 0x00000000);
1569 } else { 1561 } else {
@@ -1580,14 +1572,25 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
1580static enum drm_connector_status 1572static enum drm_connector_status
1581nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 1573nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1582{ 1574{
1575 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1583 struct nv50_disp *disp = nv50_disp(encoder->dev); 1576 struct nv50_disp *disp = nv50_disp(encoder->dev);
1584 int ret, or = nouveau_encoder(encoder)->or; 1577 struct {
1585 u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; 1578 struct nv50_disp_mthd_v1 base;
1586 if (load == 0) 1579 struct nv50_disp_dac_load_v0 load;
1587 load = 340; 1580 } args = {
1581 .base.version = 1,
1582 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
1583 .base.hasht = nv_encoder->dcb->hasht,
1584 .base.hashm = nv_encoder->dcb->hashm,
1585 };
1586 int ret;
1587
1588 args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
1589 if (args.load.data == 0)
1590 args.load.data = 340;
1588 1591
1589 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); 1592 ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
1590 if (ret || !load) 1593 if (ret || !args.load.load)
1591 return connector_status_disconnected; 1594 return connector_status_disconnected;
1592 1595
1593 return connector_status_connected; 1596 return connector_status_connected;
@@ -1619,7 +1622,7 @@ static int
1619nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) 1622nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1620{ 1623{
1621 struct nouveau_drm *drm = nouveau_drm(connector->dev); 1624 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1622 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 1625 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
1623 struct nouveau_encoder *nv_encoder; 1626 struct nouveau_encoder *nv_encoder;
1624 struct drm_encoder *encoder; 1627 struct drm_encoder *encoder;
1625 int type = DRM_MODE_ENCODER_DAC; 1628 int type = DRM_MODE_ENCODER_DAC;
@@ -1650,16 +1653,25 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1650 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1653 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1651 struct nouveau_connector *nv_connector; 1654 struct nouveau_connector *nv_connector;
1652 struct nv50_disp *disp = nv50_disp(encoder->dev); 1655 struct nv50_disp *disp = nv50_disp(encoder->dev);
1656 struct {
1657 struct nv50_disp_mthd_v1 base;
1658 struct nv50_disp_sor_hda_eld_v0 eld;
1659 u8 data[sizeof(nv_connector->base.eld)];
1660 } args = {
1661 .base.version = 1,
1662 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
1663 .base.hasht = nv_encoder->dcb->hasht,
1664 .base.hashm = nv_encoder->dcb->hashm,
1665 };
1653 1666
1654 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1667 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1655 if (!drm_detect_monitor_audio(nv_connector->edid)) 1668 if (!drm_detect_monitor_audio(nv_connector->edid))
1656 return; 1669 return;
1657 1670
1658 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1671 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1672 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
1659 1673
1660 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, 1674 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1661 nv_connector->base.eld,
1662 nv_connector->base.eld[2] * 4);
1663} 1675}
1664 1676
1665static void 1677static void
@@ -1667,8 +1679,17 @@ nv50_audio_disconnect(struct drm_encoder *encoder)
1667{ 1679{
1668 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1680 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1669 struct nv50_disp *disp = nv50_disp(encoder->dev); 1681 struct nv50_disp *disp = nv50_disp(encoder->dev);
1682 struct {
1683 struct nv50_disp_mthd_v1 base;
1684 struct nv50_disp_sor_hda_eld_v0 eld;
1685 } args = {
1686 .base.version = 1,
1687 .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
1688 .base.hasht = nv_encoder->dcb->hasht,
1689 .base.hashm = nv_encoder->dcb->hashm,
1690 };
1670 1691
1671 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); 1692 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1672} 1693}
1673 1694
1674/****************************************************************************** 1695/******************************************************************************
@@ -1679,10 +1700,20 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1679{ 1700{
1680 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1701 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1681 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1702 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1682 struct nouveau_connector *nv_connector;
1683 struct nv50_disp *disp = nv50_disp(encoder->dev); 1703 struct nv50_disp *disp = nv50_disp(encoder->dev);
1684 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; 1704 struct {
1685 u32 rekey = 56; /* binary driver, and tegra constant */ 1705 struct nv50_disp_mthd_v1 base;
1706 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
1707 } args = {
1708 .base.version = 1,
1709 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
1710 .base.hasht = nv_encoder->dcb->hasht,
1711 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
1712 (0x0100 << nv_crtc->index),
1713 .pwr.state = 1,
1714 .pwr.rekey = 56, /* binary driver, and tegra, constant */
1715 };
1716 struct nouveau_connector *nv_connector;
1686 u32 max_ac_packet; 1717 u32 max_ac_packet;
1687 1718
1688 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1719 nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -1690,14 +1721,11 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1690 return; 1721 return;
1691 1722
1692 max_ac_packet = mode->htotal - mode->hdisplay; 1723 max_ac_packet = mode->htotal - mode->hdisplay;
1693 max_ac_packet -= rekey; 1724 max_ac_packet -= args.pwr.rekey;
1694 max_ac_packet -= 18; /* constant from tegra */ 1725 max_ac_packet -= 18; /* constant from tegra */
1695 max_ac_packet /= 32; 1726 args.pwr.max_ac_packet = max_ac_packet / 32;
1696
1697 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
1698 NV84_DISP_SOR_HDMI_PWR_STATE_ON |
1699 (max_ac_packet << 16) | rekey);
1700 1727
1728 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1701 nv50_audio_mode_set(encoder, mode); 1729 nv50_audio_mode_set(encoder, mode);
1702} 1730}
1703 1731
@@ -1706,11 +1734,20 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
1706{ 1734{
1707 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1735 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1708 struct nv50_disp *disp = nv50_disp(encoder->dev); 1736 struct nv50_disp *disp = nv50_disp(encoder->dev);
1709 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; 1737 struct {
1738 struct nv50_disp_mthd_v1 base;
1739 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
1740 } args = {
1741 .base.version = 1,
1742 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
1743 .base.hasht = nv_encoder->dcb->hasht,
1744 .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
1745 (0x0100 << nv_crtc->index),
1746 };
1710 1747
1711 nv50_audio_disconnect(encoder); 1748 nv50_audio_disconnect(encoder);
1712 1749
1713 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); 1750 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1714} 1751}
1715 1752
1716/****************************************************************************** 1753/******************************************************************************
@@ -1720,10 +1757,29 @@ static void
1720nv50_sor_dpms(struct drm_encoder *encoder, int mode) 1757nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1721{ 1758{
1722 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1759 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1760 struct nv50_disp *disp = nv50_disp(encoder->dev);
1761 struct {
1762 struct nv50_disp_mthd_v1 base;
1763 struct nv50_disp_sor_pwr_v0 pwr;
1764 } args = {
1765 .base.version = 1,
1766 .base.method = NV50_DISP_MTHD_V1_SOR_PWR,
1767 .base.hasht = nv_encoder->dcb->hasht,
1768 .base.hashm = nv_encoder->dcb->hashm,
1769 .pwr.state = mode == DRM_MODE_DPMS_ON,
1770 };
1771 struct {
1772 struct nv50_disp_mthd_v1 base;
1773 struct nv50_disp_sor_dp_pwr_v0 pwr;
1774 } link = {
1775 .base.version = 1,
1776 .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
1777 .base.hasht = nv_encoder->dcb->hasht,
1778 .base.hashm = nv_encoder->dcb->hashm,
1779 .pwr.state = mode == DRM_MODE_DPMS_ON,
1780 };
1723 struct drm_device *dev = encoder->dev; 1781 struct drm_device *dev = encoder->dev;
1724 struct nv50_disp *disp = nv50_disp(dev);
1725 struct drm_encoder *partner; 1782 struct drm_encoder *partner;
1726 u32 mthd;
1727 1783
1728 nv_encoder->last_dpms = mode; 1784 nv_encoder->last_dpms = mode;
1729 1785
@@ -1741,18 +1797,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1741 } 1797 }
1742 } 1798 }
1743 1799
1744 mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
1745 mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
1746 mthd |= nv_encoder->or;
1747
1748 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 1800 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1749 nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1); 1801 args.pwr.state = 1;
1750 mthd |= NV94_DISP_SOR_DP_PWR; 1802 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1803 nvif_mthd(disp->disp, 0, &link, sizeof(link));
1751 } else { 1804 } else {
1752 mthd |= NV50_DISP_SOR_PWR; 1805 nvif_mthd(disp->disp, 0, &args, sizeof(args));
1753 } 1806 }
1754
1755 nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON));
1756} 1807}
1757 1808
1758static bool 1809static bool
@@ -1781,7 +1832,7 @@ nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
1781 struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev); 1832 struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
1782 u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push; 1833 u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
1783 if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) { 1834 if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
1784 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 1835 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1785 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1); 1836 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
1786 evo_data(push, (nv_encoder->ctrl = temp)); 1837 evo_data(push, (nv_encoder->ctrl = temp));
1787 } else { 1838 } else {
@@ -1817,15 +1868,24 @@ static void
1817nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, 1868nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1818 struct drm_display_mode *mode) 1869 struct drm_display_mode *mode)
1819{ 1870{
1871 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1872 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1873 struct {
1874 struct nv50_disp_mthd_v1 base;
1875 struct nv50_disp_sor_lvds_script_v0 lvds;
1876 } lvds = {
1877 .base.version = 1,
1878 .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
1879 .base.hasht = nv_encoder->dcb->hasht,
1880 .base.hashm = nv_encoder->dcb->hashm,
1881 };
1820 struct nv50_disp *disp = nv50_disp(encoder->dev); 1882 struct nv50_disp *disp = nv50_disp(encoder->dev);
1821 struct nv50_mast *mast = nv50_mast(encoder->dev); 1883 struct nv50_mast *mast = nv50_mast(encoder->dev);
1822 struct drm_device *dev = encoder->dev; 1884 struct drm_device *dev = encoder->dev;
1823 struct nouveau_drm *drm = nouveau_drm(dev); 1885 struct nouveau_drm *drm = nouveau_drm(dev);
1824 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1825 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1826 struct nouveau_connector *nv_connector; 1886 struct nouveau_connector *nv_connector;
1827 struct nvbios *bios = &drm->vbios; 1887 struct nvbios *bios = &drm->vbios;
1828 u32 lvds = 0, mask, ctrl; 1888 u32 mask, ctrl;
1829 u8 owner = 1 << nv_crtc->index; 1889 u8 owner = 1 << nv_crtc->index;
1830 u8 proto = 0xf; 1890 u8 proto = 0xf;
1831 u8 depth = 0x0; 1891 u8 depth = 0x0;
@@ -1851,31 +1911,31 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1851 1911
1852 if (bios->fp_no_ddc) { 1912 if (bios->fp_no_ddc) {
1853 if (bios->fp.dual_link) 1913 if (bios->fp.dual_link)
1854 lvds |= 0x0100; 1914 lvds.lvds.script |= 0x0100;
1855 if (bios->fp.if_is_24bit) 1915 if (bios->fp.if_is_24bit)
1856 lvds |= 0x0200; 1916 lvds.lvds.script |= 0x0200;
1857 } else { 1917 } else {
1858 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { 1918 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
1859 if (((u8 *)nv_connector->edid)[121] == 2) 1919 if (((u8 *)nv_connector->edid)[121] == 2)
1860 lvds |= 0x0100; 1920 lvds.lvds.script |= 0x0100;
1861 } else 1921 } else
1862 if (mode->clock >= bios->fp.duallink_transition_clk) { 1922 if (mode->clock >= bios->fp.duallink_transition_clk) {
1863 lvds |= 0x0100; 1923 lvds.lvds.script |= 0x0100;
1864 } 1924 }
1865 1925
1866 if (lvds & 0x0100) { 1926 if (lvds.lvds.script & 0x0100) {
1867 if (bios->fp.strapless_is_24bit & 2) 1927 if (bios->fp.strapless_is_24bit & 2)
1868 lvds |= 0x0200; 1928 lvds.lvds.script |= 0x0200;
1869 } else { 1929 } else {
1870 if (bios->fp.strapless_is_24bit & 1) 1930 if (bios->fp.strapless_is_24bit & 1)
1871 lvds |= 0x0200; 1931 lvds.lvds.script |= 0x0200;
1872 } 1932 }
1873 1933
1874 if (nv_connector->base.display_info.bpc == 8) 1934 if (nv_connector->base.display_info.bpc == 8)
1875 lvds |= 0x0200; 1935 lvds.lvds.script |= 0x0200;
1876 } 1936 }
1877 1937
1878 nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); 1938 nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
1879 break; 1939 break;
1880 case DCB_OUTPUT_DP: 1940 case DCB_OUTPUT_DP:
1881 if (nv_connector->base.display_info.bpc == 6) { 1941 if (nv_connector->base.display_info.bpc == 6) {
@@ -1902,7 +1962,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1902 1962
1903 nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON); 1963 nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
1904 1964
1905 if (nv50_vers(mast) >= NVD0_DISP_CLASS) { 1965 if (nv50_vers(mast) >= GF110_DISP) {
1906 u32 *push = evo_wait(mast, 3); 1966 u32 *push = evo_wait(mast, 3);
1907 if (push) { 1967 if (push) {
1908 u32 magic = 0x31ec6000 | (nv_crtc->index << 25); 1968 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
@@ -1961,7 +2021,7 @@ static int
1961nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) 2021nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1962{ 2022{
1963 struct nouveau_drm *drm = nouveau_drm(connector->dev); 2023 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1964 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 2024 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
1965 struct nouveau_encoder *nv_encoder; 2025 struct nouveau_encoder *nv_encoder;
1966 struct drm_encoder *encoder; 2026 struct drm_encoder *encoder;
1967 int type; 2027 int type;
@@ -2002,9 +2062,19 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
2002{ 2062{
2003 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 2063 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2004 struct nv50_disp *disp = nv50_disp(encoder->dev); 2064 struct nv50_disp *disp = nv50_disp(encoder->dev);
2005 u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or; 2065 struct {
2006 u32 ctrl = (mode == DRM_MODE_DPMS_ON); 2066 struct nv50_disp_mthd_v1 base;
2007 nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl); 2067 struct nv50_disp_pior_pwr_v0 pwr;
2068 } args = {
2069 .base.version = 1,
2070 .base.method = NV50_DISP_MTHD_V1_PIOR_PWR,
2071 .base.hasht = nv_encoder->dcb->hasht,
2072 .base.hashm = nv_encoder->dcb->hashm,
2073 .pwr.state = mode == DRM_MODE_DPMS_ON,
2074 .pwr.type = nv_encoder->dcb->type,
2075 };
2076
2077 nvif_mthd(disp->disp, 0, &args, sizeof(args));
2008} 2078}
2009 2079
2010static bool 2080static bool
@@ -2067,7 +2137,7 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
2067 2137
2068 push = evo_wait(mast, 8); 2138 push = evo_wait(mast, 8);
2069 if (push) { 2139 if (push) {
2070 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 2140 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2071 u32 ctrl = (depth << 16) | (proto << 8) | owner; 2141 u32 ctrl = (depth << 16) | (proto << 8) | owner;
2072 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 2142 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2073 ctrl |= 0x00001000; 2143 ctrl |= 0x00001000;
@@ -2096,7 +2166,7 @@ nv50_pior_disconnect(struct drm_encoder *encoder)
2096 2166
2097 push = evo_wait(mast, 4); 2167 push = evo_wait(mast, 4);
2098 if (push) { 2168 if (push) {
2099 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { 2169 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2100 evo_mthd(push, 0x0700 + (or * 0x040), 1); 2170 evo_mthd(push, 0x0700 + (or * 0x040), 1);
2101 evo_data(push, 0x00000000); 2171 evo_data(push, 0x00000000);
2102 } 2172 }
@@ -2132,7 +2202,7 @@ static int
2132nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) 2202nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2133{ 2203{
2134 struct nouveau_drm *drm = nouveau_drm(connector->dev); 2204 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2135 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 2205 struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
2136 struct nouveau_i2c_port *ddc = NULL; 2206 struct nouveau_i2c_port *ddc = NULL;
2137 struct nouveau_encoder *nv_encoder; 2207 struct nouveau_encoder *nv_encoder;
2138 struct drm_encoder *encoder; 2208 struct drm_encoder *encoder;
@@ -2169,8 +2239,151 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2169} 2239}
2170 2240
2171/****************************************************************************** 2241/******************************************************************************
2242 * Framebuffer
2243 *****************************************************************************/
2244
2245static void
2246nv50_fbdma_fini(struct nv50_fbdma *fbdma)
2247{
2248 int i;
2249 for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
2250 nvif_object_fini(&fbdma->base[i]);
2251 nvif_object_fini(&fbdma->core);
2252 list_del(&fbdma->head);
2253 kfree(fbdma);
2254}
2255
2256static int
2257nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
2258{
2259 struct nouveau_drm *drm = nouveau_drm(dev);
2260 struct nv50_disp *disp = nv50_disp(dev);
2261 struct nv50_mast *mast = nv50_mast(dev);
2262 struct __attribute__ ((packed)) {
2263 struct nv_dma_v0 base;
2264 union {
2265 struct nv50_dma_v0 nv50;
2266 struct gf100_dma_v0 gf100;
2267 struct gf110_dma_v0 gf110;
2268 };
2269 } args = {};
2270 struct nv50_fbdma *fbdma;
2271 struct drm_crtc *crtc;
2272 u32 size = sizeof(args.base);
2273 int ret;
2274
2275 list_for_each_entry(fbdma, &disp->fbdma, head) {
2276 if (fbdma->core.handle == name)
2277 return 0;
2278 }
2279
2280 fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
2281 if (!fbdma)
2282 return -ENOMEM;
2283 list_add(&fbdma->head, &disp->fbdma);
2284
2285 args.base.target = NV_DMA_V0_TARGET_VRAM;
2286 args.base.access = NV_DMA_V0_ACCESS_RDWR;
2287 args.base.start = offset;
2288 args.base.limit = offset + length - 1;
2289
2290 if (drm->device.info.chipset < 0x80) {
2291 args.nv50.part = NV50_DMA_V0_PART_256;
2292 size += sizeof(args.nv50);
2293 } else
2294 if (drm->device.info.chipset < 0xc0) {
2295 args.nv50.part = NV50_DMA_V0_PART_256;
2296 args.nv50.kind = kind;
2297 size += sizeof(args.nv50);
2298 } else
2299 if (drm->device.info.chipset < 0xd0) {
2300 args.gf100.kind = kind;
2301 size += sizeof(args.gf100);
2302 } else {
2303 args.gf110.page = GF110_DMA_V0_PAGE_LP;
2304 args.gf110.kind = kind;
2305 size += sizeof(args.gf110);
2306 }
2307
2308 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2309 struct nv50_head *head = nv50_head(crtc);
2310 int ret = nvif_object_init(&head->sync.base.base.user, NULL,
2311 name, NV_DMA_IN_MEMORY, &args, size,
2312 &fbdma->base[head->base.index]);
2313 if (ret) {
2314 nv50_fbdma_fini(fbdma);
2315 return ret;
2316 }
2317 }
2318
2319 ret = nvif_object_init(&mast->base.base.user, NULL, name,
2320 NV_DMA_IN_MEMORY, &args, size,
2321 &fbdma->core);
2322 if (ret) {
2323 nv50_fbdma_fini(fbdma);
2324 return ret;
2325 }
2326
2327 return 0;
2328}
2329
2330static void
2331nv50_fb_dtor(struct drm_framebuffer *fb)
2332{
2333}
2334
2335static int
2336nv50_fb_ctor(struct drm_framebuffer *fb)
2337{
2338 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
2339 struct nouveau_drm *drm = nouveau_drm(fb->dev);
2340 struct nouveau_bo *nvbo = nv_fb->nvbo;
2341 struct nv50_disp *disp = nv50_disp(fb->dev);
2342 u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
2343 u8 tile = nvbo->tile_mode;
2344
2345 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
2346 NV_ERROR(drm, "framebuffer requires contiguous bo\n");
2347 return -EINVAL;
2348 }
2349
2350 if (drm->device.info.chipset >= 0xc0)
2351 tile >>= 4; /* yep.. */
2352
2353 switch (fb->depth) {
2354 case 8: nv_fb->r_format = 0x1e00; break;
2355 case 15: nv_fb->r_format = 0xe900; break;
2356 case 16: nv_fb->r_format = 0xe800; break;
2357 case 24:
2358 case 32: nv_fb->r_format = 0xcf00; break;
2359 case 30: nv_fb->r_format = 0xd100; break;
2360 default:
2361 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
2362 return -EINVAL;
2363 }
2364
2365 if (disp->disp->oclass < G82_DISP) {
2366 nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
2367 (fb->pitches[0] | 0x00100000);
2368 nv_fb->r_format |= kind << 16;
2369 } else
2370 if (disp->disp->oclass < GF110_DISP) {
2371 nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
2372 (fb->pitches[0] | 0x00100000);
2373 } else {
2374 nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
2375 (fb->pitches[0] | 0x01000000);
2376 }
2377 nv_fb->r_handle = 0xffff0000 | kind;
2378
2379 return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
2380 drm->device.info.ram_user, kind);
2381}
2382
2383/******************************************************************************
2172 * Init 2384 * Init
2173 *****************************************************************************/ 2385 *****************************************************************************/
2386
2174void 2387void
2175nv50_display_fini(struct drm_device *dev) 2388nv50_display_fini(struct drm_device *dev)
2176{ 2389{
@@ -2193,7 +2406,7 @@ nv50_display_init(struct drm_device *dev)
2193 } 2406 }
2194 2407
2195 evo_mthd(push, 0x0088, 1); 2408 evo_mthd(push, 0x0088, 1);
2196 evo_data(push, NvEvoSync); 2409 evo_data(push, nv50_mast(dev)->base.sync.handle);
2197 evo_kick(push, nv50_mast(dev)); 2410 evo_kick(push, nv50_mast(dev));
2198 return 0; 2411 return 0;
2199} 2412}
@@ -2202,8 +2415,13 @@ void
2202nv50_display_destroy(struct drm_device *dev) 2415nv50_display_destroy(struct drm_device *dev)
2203{ 2416{
2204 struct nv50_disp *disp = nv50_disp(dev); 2417 struct nv50_disp *disp = nv50_disp(dev);
2418 struct nv50_fbdma *fbdma, *fbtmp;
2419
2420 list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
2421 nv50_fbdma_fini(fbdma);
2422 }
2205 2423
2206 nv50_dmac_destroy(disp->core, &disp->mast.base); 2424 nv50_dmac_destroy(&disp->mast.base, disp->disp);
2207 2425
2208 nouveau_bo_unmap(disp->sync); 2426 nouveau_bo_unmap(disp->sync);
2209 if (disp->sync) 2427 if (disp->sync)
@@ -2217,7 +2435,7 @@ nv50_display_destroy(struct drm_device *dev)
2217int 2435int
2218nv50_display_create(struct drm_device *dev) 2436nv50_display_create(struct drm_device *dev)
2219{ 2437{
2220 struct nouveau_device *device = nouveau_dev(dev); 2438 struct nvif_device *device = &nouveau_drm(dev)->device;
2221 struct nouveau_drm *drm = nouveau_drm(dev); 2439 struct nouveau_drm *drm = nouveau_drm(dev);
2222 struct dcb_table *dcb = &drm->vbios.dcb; 2440 struct dcb_table *dcb = &drm->vbios.dcb;
2223 struct drm_connector *connector, *tmp; 2441 struct drm_connector *connector, *tmp;
@@ -2228,12 +2446,15 @@ nv50_display_create(struct drm_device *dev)
2228 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 2446 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
2229 if (!disp) 2447 if (!disp)
2230 return -ENOMEM; 2448 return -ENOMEM;
2449 INIT_LIST_HEAD(&disp->fbdma);
2231 2450
2232 nouveau_display(dev)->priv = disp; 2451 nouveau_display(dev)->priv = disp;
2233 nouveau_display(dev)->dtor = nv50_display_destroy; 2452 nouveau_display(dev)->dtor = nv50_display_destroy;
2234 nouveau_display(dev)->init = nv50_display_init; 2453 nouveau_display(dev)->init = nv50_display_init;
2235 nouveau_display(dev)->fini = nv50_display_fini; 2454 nouveau_display(dev)->fini = nv50_display_fini;
2236 disp->core = nouveau_display(dev)->core; 2455 nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
2456 nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
2457 disp->disp = &nouveau_display(dev)->disp;
2237 2458
2238 /* small shared memory area we use for notifiers and semaphores */ 2459 /* small shared memory area we use for notifiers and semaphores */
2239 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2460 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2253,22 +2474,19 @@ nv50_display_create(struct drm_device *dev)
2253 goto out; 2474 goto out;
2254 2475
2255 /* allocate master evo channel */ 2476 /* allocate master evo channel */
2256 ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, 2477 ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
2257 &(struct nv50_display_mast_class) { 2478 &disp->mast);
2258 .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
2259 }, sizeof(struct nv50_display_mast_class),
2260 disp->sync->bo.offset, &disp->mast.base);
2261 if (ret) 2479 if (ret)
2262 goto out; 2480 goto out;
2263 2481
2264 /* create crtc objects to represent the hw heads */ 2482 /* create crtc objects to represent the hw heads */
2265 if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) 2483 if (disp->disp->oclass >= GF110_DISP)
2266 crtcs = nv_rd32(device, 0x022448); 2484 crtcs = nvif_rd32(device, 0x022448);
2267 else 2485 else
2268 crtcs = 2; 2486 crtcs = 2;
2269 2487
2270 for (i = 0; i < crtcs; i++) { 2488 for (i = 0; i < crtcs; i++) {
2271 ret = nv50_crtc_create(dev, disp->core, i); 2489 ret = nv50_crtc_create(dev, i);
2272 if (ret) 2490 if (ret)
2273 goto out; 2491 goto out;
2274 } 2492 }
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 52068a0910dc..394c89abcc97 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -154,7 +154,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
154 struct drm_device *dev = nfbdev->dev; 154 struct drm_device *dev = nfbdev->dev;
155 struct nouveau_drm *drm = nouveau_drm(dev); 155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel; 156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
158 int ret, format; 157 int ret, format;
159 158
160 switch (info->var.bits_per_pixel) { 159 switch (info->var.bits_per_pixel) {
@@ -184,8 +183,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
184 return -EINVAL; 183 return -EINVAL;
185 } 184 }
186 185
187 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, 186 ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
188 0x502d, NULL, 0, &object); 187 &nfbdev->twod);
189 if (ret) 188 if (ret)
190 return ret; 189 return ret;
191 190
@@ -196,11 +195,11 @@ nv50_fbcon_accel_init(struct fb_info *info)
196 } 195 }
197 196
198 BEGIN_NV04(chan, NvSub2D, 0x0000, 1); 197 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
199 OUT_RING(chan, Nv2D); 198 OUT_RING(chan, nfbdev->twod.handle);
200 BEGIN_NV04(chan, NvSub2D, 0x0184, 3); 199 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
201 OUT_RING(chan, NvDmaFB); 200 OUT_RING(chan, chan->vram.handle);
202 OUT_RING(chan, NvDmaFB); 201 OUT_RING(chan, chan->vram.handle);
203 OUT_RING(chan, NvDmaFB); 202 OUT_RING(chan, chan->vram.handle);
204 BEGIN_NV04(chan, NvSub2D, 0x0290, 1); 203 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
205 OUT_RING(chan, 0); 204 OUT_RING(chan, 0);
206 BEGIN_NV04(chan, NvSub2D, 0x0888, 1); 205 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 0ee363840035..195cf51a7c31 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -22,8 +22,8 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <core/object.h> 25#include <nvif/os.h>
26#include <core/class.h> 26#include <nvif/class.h>
27 27
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
@@ -38,7 +38,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
38 struct nv10_fence_priv *priv = chan->drm->fence; 38 struct nv10_fence_priv *priv = chan->drm->fence;
39 struct nv10_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
40 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
41 struct nouveau_object *object;
42 u32 start = mem->start * PAGE_SIZE; 41 u32 start = mem->start * PAGE_SIZE;
43 u32 limit = start + mem->size - 1; 42 u32 limit = start + mem->size - 1;
44 int ret, i; 43 int ret, i;
@@ -52,15 +51,14 @@ nv50_fence_context_new(struct nouveau_channel *chan)
52 fctx->base.read = nv10_fence_read; 51 fctx->base.read = nv10_fence_read;
53 fctx->base.sync = nv17_fence_sync; 52 fctx->base.sync = nv17_fence_sync;
54 53
55 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 54 ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
56 NvSema, 0x003d, 55 &(struct nv_dma_v0) {
57 &(struct nv_dma_class) { 56 .target = NV_DMA_V0_TARGET_VRAM,
58 .flags = NV_DMA_TARGET_VRAM | 57 .access = NV_DMA_V0_ACCESS_RDWR,
59 NV_DMA_ACCESS_RDWR,
60 .start = start, 58 .start = start,
61 .limit = limit, 59 .limit = limit,
62 }, sizeof(struct nv_dma_class), 60 }, sizeof(struct nv_dma_v0),
63 &object); 61 &fctx->sema);
64 62
65 /* dma objects for display sync channel semaphore blocks */ 63 /* dma objects for display sync channel semaphore blocks */
66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { 64 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
@@ -68,15 +66,14 @@ nv50_fence_context_new(struct nouveau_channel *chan)
68 u32 start = bo->bo.mem.start * PAGE_SIZE; 66 u32 start = bo->bo.mem.start * PAGE_SIZE;
69 u32 limit = start + bo->bo.mem.size - 1; 67 u32 limit = start + bo->bo.mem.size - 1;
70 68
71 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 69 ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
72 NvEvoSema0 + i, 0x003d, 70 NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
73 &(struct nv_dma_class) { 71 .target = NV_DMA_V0_TARGET_VRAM,
74 .flags = NV_DMA_TARGET_VRAM | 72 .access = NV_DMA_V0_ACCESS_RDWR,
75 NV_DMA_ACCESS_RDWR,
76 .start = start, 73 .start = start,
77 .limit = limit, 74 .limit = limit,
78 }, sizeof(struct nv_dma_class), 75 }, sizeof(struct nv_dma_v0),
79 &object); 76 &fctx->head[i]);
80 } 77 }
81 78
82 if (ret) 79 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 9fd475c89820..933a779c93ab 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,12 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/class.h>
28
29#include <engine/fifo.h>
30
31#include "nouveau_drm.h" 25#include "nouveau_drm.h"
32#include "nouveau_dma.h" 26#include "nouveau_dma.h"
33#include "nouveau_fence.h" 27#include "nouveau_fence.h"
@@ -47,7 +41,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
47 int ret = RING_SPACE(chan, 8); 41 int ret = RING_SPACE(chan, 8);
48 if (ret == 0) { 42 if (ret == 0) {
49 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 43 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
50 OUT_RING (chan, chan->vram); 44 OUT_RING (chan, chan->vram.handle);
51 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); 45 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
52 OUT_RING (chan, upper_32_bits(virtual)); 46 OUT_RING (chan, upper_32_bits(virtual));
53 OUT_RING (chan, lower_32_bits(virtual)); 47 OUT_RING (chan, lower_32_bits(virtual));
@@ -65,7 +59,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
65 int ret = RING_SPACE(chan, 7); 59 int ret = RING_SPACE(chan, 7);
66 if (ret == 0) { 60 if (ret == 0) {
67 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 61 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
68 OUT_RING (chan, chan->vram); 62 OUT_RING (chan, chan->vram.handle);
69 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 63 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
70 OUT_RING (chan, upper_32_bits(virtual)); 64 OUT_RING (chan, upper_32_bits(virtual));
71 OUT_RING (chan, lower_32_bits(virtual)); 65 OUT_RING (chan, lower_32_bits(virtual));
@@ -81,8 +75,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
81{ 75{
82 struct nouveau_channel *chan = fence->channel; 76 struct nouveau_channel *chan = fence->channel;
83 struct nv84_fence_chan *fctx = chan->fence; 77 struct nv84_fence_chan *fctx = chan->fence;
84 struct nouveau_fifo_chan *fifo = (void *)chan->object; 78 u64 addr = chan->chid * 16;
85 u64 addr = fifo->chid * 16;
86 79
87 if (fence->sysmem) 80 if (fence->sysmem)
88 addr += fctx->vma_gart.offset; 81 addr += fctx->vma_gart.offset;
@@ -97,8 +90,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
97 struct nouveau_channel *prev, struct nouveau_channel *chan) 90 struct nouveau_channel *prev, struct nouveau_channel *chan)
98{ 91{
99 struct nv84_fence_chan *fctx = chan->fence; 92 struct nv84_fence_chan *fctx = chan->fence;
100 struct nouveau_fifo_chan *fifo = (void *)prev->object; 93 u64 addr = prev->chid * 16;
101 u64 addr = fifo->chid * 16;
102 94
103 if (fence->sysmem) 95 if (fence->sysmem)
104 addr += fctx->vma_gart.offset; 96 addr += fctx->vma_gart.offset;
@@ -111,9 +103,8 @@ nv84_fence_sync(struct nouveau_fence *fence,
111static u32 103static u32
112nv84_fence_read(struct nouveau_channel *chan) 104nv84_fence_read(struct nouveau_channel *chan)
113{ 105{
114 struct nouveau_fifo_chan *fifo = (void *)chan->object;
115 struct nv84_fence_priv *priv = chan->drm->fence; 106 struct nv84_fence_priv *priv = chan->drm->fence;
116 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); 107 return nouveau_bo_rd32(priv->bo, chan->chid * 16/4);
117} 108}
118 109
119static void 110static void
@@ -139,8 +130,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
139int 130int
140nv84_fence_context_new(struct nouveau_channel *chan) 131nv84_fence_context_new(struct nouveau_channel *chan)
141{ 132{
142 struct nouveau_fifo_chan *fifo = (void *)chan->object; 133 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
143 struct nouveau_client *client = nouveau_client(fifo);
144 struct nv84_fence_priv *priv = chan->drm->fence; 134 struct nv84_fence_priv *priv = chan->drm->fence;
145 struct nv84_fence_chan *fctx; 135 struct nv84_fence_chan *fctx;
146 int ret, i; 136 int ret, i;
@@ -156,19 +146,19 @@ nv84_fence_context_new(struct nouveau_channel *chan)
156 fctx->base.emit32 = nv84_fence_emit32; 146 fctx->base.emit32 = nv84_fence_emit32;
157 fctx->base.sync32 = nv84_fence_sync32; 147 fctx->base.sync32 = nv84_fence_sync32;
158 148
159 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); 149 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
160 if (ret == 0) { 150 if (ret == 0) {
161 ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, 151 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
162 &fctx->vma_gart); 152 &fctx->vma_gart);
163 } 153 }
164 154
165 /* map display semaphore buffers into channel's vm */ 155 /* map display semaphore buffers into channel's vm */
166 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { 156 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
167 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); 157 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
168 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); 158 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
169 } 159 }
170 160
171 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); 161 nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000);
172 162
173 if (ret) 163 if (ret)
174 nv84_fence_context_del(chan); 164 nv84_fence_context_del(chan);
@@ -178,7 +168,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
178static bool 168static bool
179nv84_fence_suspend(struct nouveau_drm *drm) 169nv84_fence_suspend(struct nouveau_drm *drm)
180{ 170{
181 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 171 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
182 struct nv84_fence_priv *priv = drm->fence; 172 struct nv84_fence_priv *priv = drm->fence;
183 int i; 173 int i;
184 174
@@ -194,7 +184,7 @@ nv84_fence_suspend(struct nouveau_drm *drm)
194static void 184static void
195nv84_fence_resume(struct nouveau_drm *drm) 185nv84_fence_resume(struct nouveau_drm *drm)
196{ 186{
197 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 187 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
198 struct nv84_fence_priv *priv = drm->fence; 188 struct nv84_fence_priv *priv = drm->fence;
199 int i; 189 int i;
200 190
@@ -225,7 +215,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
225int 215int
226nv84_fence_create(struct nouveau_drm *drm) 216nv84_fence_create(struct nouveau_drm *drm)
227{ 217{
228 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 218 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
229 struct nv84_fence_priv *priv; 219 struct nv84_fence_priv *priv;
230 int ret; 220 int ret;
231 221
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 9dcd30f3e1e0..61246677e8dc 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -154,11 +154,10 @@ nvc0_fbcon_accel_init(struct fb_info *info)
154 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 154 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
155 struct nouveau_drm *drm = nouveau_drm(dev); 155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel; 156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
158 int ret, format; 157 int ret, format;
159 158
160 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, 159 ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
161 0x902d, NULL, 0, &object); 160 &nfbdev->twod);
162 if (ret) 161 if (ret)
163 return ret; 162 return ret;
164 163
@@ -197,7 +196,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
197 } 196 }
198 197
199 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); 198 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
200 OUT_RING (chan, 0x0000902d); 199 OUT_RING (chan, nfbdev->twod.handle);
201 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); 200 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
202 OUT_RING (chan, 0); 201 OUT_RING (chan, 0);
203 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); 202 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 9566267fbc42..becf19abda2d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,12 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/class.h>
28
29#include <engine/fifo.h>
30
31#include "nouveau_drm.h" 25#include "nouveau_drm.h"
32#include "nouveau_dma.h" 26#include "nouveau_dma.h"
33#include "nouveau_fence.h" 27#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
new file mode 100644
index 000000000000..cc81e0e5fd30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -0,0 +1,558 @@
1#ifndef __NVIF_CLASS_H__
2#define __NVIF_CLASS_H__
3
4/*******************************************************************************
5 * class identifiers
6 ******************************************************************************/
7
8/* the below match nvidia-assigned (either in hw, or sw) class numbers */
9#define NV_DEVICE 0x00000080
10
11#define NV_DMA_FROM_MEMORY 0x00000002
12#define NV_DMA_TO_MEMORY 0x00000003
13#define NV_DMA_IN_MEMORY 0x0000003d
14
15#define NV04_DISP 0x00000046
16
17#define NV03_CHANNEL_DMA 0x0000006b
18#define NV10_CHANNEL_DMA 0x0000006e
19#define NV17_CHANNEL_DMA 0x0000176e
20#define NV40_CHANNEL_DMA 0x0000406e
21#define NV50_CHANNEL_DMA 0x0000506e
22#define G82_CHANNEL_DMA 0x0000826e
23
24#define NV50_CHANNEL_GPFIFO 0x0000506f
25#define G82_CHANNEL_GPFIFO 0x0000826f
26#define FERMI_CHANNEL_GPFIFO 0x0000906f
27#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f
28
29#define NV50_DISP 0x00005070
30#define G82_DISP 0x00008270
31#define GT200_DISP 0x00008370
32#define GT214_DISP 0x00008570
33#define GT206_DISP 0x00008870
34#define GF110_DISP 0x00009070
35#define GK104_DISP 0x00009170
36#define GK110_DISP 0x00009270
37#define GM107_DISP 0x00009470
38
39#define NV50_DISP_CURSOR 0x0000507a
40#define G82_DISP_CURSOR 0x0000827a
41#define GT214_DISP_CURSOR 0x0000857a
42#define GF110_DISP_CURSOR 0x0000907a
43#define GK104_DISP_CURSOR 0x0000917a
44
45#define NV50_DISP_OVERLAY 0x0000507b
46#define G82_DISP_OVERLAY 0x0000827b
47#define GT214_DISP_OVERLAY 0x0000857b
48#define GF110_DISP_OVERLAY 0x0000907b
49#define GK104_DISP_OVERLAY 0x0000917b
50
51#define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c
52#define G82_DISP_BASE_CHANNEL_DMA 0x0000827c
53#define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c
54#define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c
55#define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c
56#define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c
57#define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c
58
59#define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d
60#define G82_DISP_CORE_CHANNEL_DMA 0x0000827d
61#define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d
62#define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d
63#define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d
64#define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d
65#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
66#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
67#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
68
69#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
70#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
71#define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e
72#define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e
73#define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e
74#define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e
75
76#define FERMI_A 0x00009097
77#define FERMI_B 0x00009197
78#define FERMI_C 0x00009297
79
80#define KEPLER_A 0x0000a097
81#define KEPLER_B 0x0000a197
82#define KEPLER_C 0x0000a297
83
84#define MAXWELL_A 0x0000b097
85
86#define FERMI_COMPUTE_A 0x000090c0
87#define FERMI_COMPUTE_B 0x000091c0
88
89#define KEPLER_COMPUTE_A 0x0000a0c0
90#define KEPLER_COMPUTE_B 0x0000a1c0
91
92#define MAXWELL_COMPUTE_A 0x0000b0c0
93
94
95/*******************************************************************************
96 * client
97 ******************************************************************************/
98
99#define NV_CLIENT_DEVLIST 0x00
100
101struct nv_client_devlist_v0 {
102 __u8 version;
103 __u8 count;
104 __u8 pad02[6];
105 __u64 device[];
106};
107
108
109/*******************************************************************************
110 * device
111 ******************************************************************************/
112
113struct nv_device_v0 {
114 __u8 version;
115 __u8 pad01[7];
116 __u64 device; /* device identifier, ~0 for client default */
117#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL
118#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL
119#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL
120#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
121#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
122#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
123#define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL
124#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
125#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
126#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
127#define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL
128#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
129#define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL
130#define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL
131#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
132#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
133#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
134 __u64 disable; /* disable particular subsystems */
135 __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
136};
137
138#define NV_DEVICE_V0_INFO 0x00
139
140struct nv_device_info_v0 {
141 __u8 version;
142#define NV_DEVICE_INFO_V0_IGP 0x00
143#define NV_DEVICE_INFO_V0_PCI 0x01
144#define NV_DEVICE_INFO_V0_AGP 0x02
145#define NV_DEVICE_INFO_V0_PCIE 0x03
146#define NV_DEVICE_INFO_V0_SOC 0x04
147 __u8 platform;
148 __u16 chipset; /* from NV_PMC_BOOT_0 */
149 __u8 revision; /* from NV_PMC_BOOT_0 */
150#define NV_DEVICE_INFO_V0_TNT 0x01
151#define NV_DEVICE_INFO_V0_CELSIUS 0x02
152#define NV_DEVICE_INFO_V0_KELVIN 0x03
153#define NV_DEVICE_INFO_V0_RANKINE 0x04
154#define NV_DEVICE_INFO_V0_CURIE 0x05
155#define NV_DEVICE_INFO_V0_TESLA 0x06
156#define NV_DEVICE_INFO_V0_FERMI 0x07
157#define NV_DEVICE_INFO_V0_KEPLER 0x08
158#define NV_DEVICE_INFO_V0_MAXWELL 0x09
159 __u8 family;
160 __u8 pad06[2];
161 __u64 ram_size;
162 __u64 ram_user;
163};
164
165
166/*******************************************************************************
167 * context dma
168 ******************************************************************************/
169
170struct nv_dma_v0 {
171 __u8 version;
172#define NV_DMA_V0_TARGET_VM 0x00
173#define NV_DMA_V0_TARGET_VRAM 0x01
174#define NV_DMA_V0_TARGET_PCI 0x02
175#define NV_DMA_V0_TARGET_PCI_US 0x03
176#define NV_DMA_V0_TARGET_AGP 0x04
177 __u8 target;
178#define NV_DMA_V0_ACCESS_VM 0x00
179#define NV_DMA_V0_ACCESS_RD 0x01
180#define NV_DMA_V0_ACCESS_WR 0x02
181#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
182 __u8 access;
183 __u8 pad03[5];
184 __u64 start;
185 __u64 limit;
186 /* ... chipset-specific class data */
187};
188
189struct nv50_dma_v0 {
190 __u8 version;
191#define NV50_DMA_V0_PRIV_VM 0x00
192#define NV50_DMA_V0_PRIV_US 0x01
193#define NV50_DMA_V0_PRIV__S 0x02
194 __u8 priv;
195#define NV50_DMA_V0_PART_VM 0x00
196#define NV50_DMA_V0_PART_256 0x01
197#define NV50_DMA_V0_PART_1KB 0x02
198 __u8 part;
199#define NV50_DMA_V0_COMP_NONE 0x00
200#define NV50_DMA_V0_COMP_1 0x01
201#define NV50_DMA_V0_COMP_2 0x02
202#define NV50_DMA_V0_COMP_VM 0x03
203 __u8 comp;
204#define NV50_DMA_V0_KIND_PITCH 0x00
205#define NV50_DMA_V0_KIND_VM 0x7f
206 __u8 kind;
207 __u8 pad05[3];
208};
209
210struct gf100_dma_v0 {
211 __u8 version;
212#define GF100_DMA_V0_PRIV_VM 0x00
213#define GF100_DMA_V0_PRIV_US 0x01
214#define GF100_DMA_V0_PRIV__S 0x02
215 __u8 priv;
216#define GF100_DMA_V0_KIND_PITCH 0x00
217#define GF100_DMA_V0_KIND_VM 0xff
218 __u8 kind;
219 __u8 pad03[5];
220};
221
222struct gf110_dma_v0 {
223 __u8 version;
224#define GF110_DMA_V0_PAGE_LP 0x00
225#define GF110_DMA_V0_PAGE_SP 0x01
226 __u8 page;
227#define GF110_DMA_V0_KIND_PITCH 0x00
228#define GF110_DMA_V0_KIND_VM 0xff
229 __u8 kind;
230 __u8 pad03[5];
231};
232
233
234/*******************************************************************************
235 * perfmon
236 ******************************************************************************/
237
238struct nvif_perfctr_v0 {
239 __u8 version;
240 __u8 pad01[1];
241 __u16 logic_op;
242 __u8 pad04[4];
243 char name[4][64];
244};
245
246#define NVIF_PERFCTR_V0_QUERY 0x00
247#define NVIF_PERFCTR_V0_SAMPLE 0x01
248#define NVIF_PERFCTR_V0_READ 0x02
249
250struct nvif_perfctr_query_v0 {
251 __u8 version;
252 __u8 pad01[3];
253 __u32 iter;
254 char name[64];
255};
256
257struct nvif_perfctr_sample {
258};
259
260struct nvif_perfctr_read_v0 {
261 __u8 version;
262 __u8 pad01[7];
263 __u32 ctr;
264 __u32 clk;
265};
266
267
268/*******************************************************************************
269 * device control
270 ******************************************************************************/
271
272#define NVIF_CONTROL_PSTATE_INFO 0x00
273#define NVIF_CONTROL_PSTATE_ATTR 0x01
274#define NVIF_CONTROL_PSTATE_USER 0x02
275
276struct nvif_control_pstate_info_v0 {
277 __u8 version;
278 __u8 count; /* out: number of power states */
279#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1)
280#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2)
281 __s8 ustate_ac; /* out: target pstate index */
282 __s8 ustate_dc; /* out: target pstate index */
283 __s8 pwrsrc; /* out: current power source */
284#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1)
285#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2)
286 __s8 pstate; /* out: current pstate index */
287 __u8 pad06[2];
288};
289
290struct nvif_control_pstate_attr_v0 {
291 __u8 version;
292#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1)
293 __s8 state; /* in: index of pstate to query
294 * out: pstate identifier
295 */
296 __u8 index; /* in: index of attribute to query
297 * out: index of next attribute, or 0 if no more
298 */
299 __u8 pad03[5];
300 __u32 min;
301 __u32 max;
302 char name[32];
303 char unit[16];
304};
305
306struct nvif_control_pstate_user_v0 {
307 __u8 version;
308#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1)
309#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2)
310 __s8 ustate; /* in: pstate identifier */
311 __s8 pwrsrc; /* in: target power source */
312 __u8 pad03[5];
313};
314
315
316/*******************************************************************************
317 * DMA FIFO channels
318 ******************************************************************************/
319
320struct nv03_channel_dma_v0 {
321 __u8 version;
322 __u8 chid;
323 __u8 pad02[2];
324 __u32 pushbuf;
325 __u64 offset;
326};
327
328#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
329
330/*******************************************************************************
331 * GPFIFO channels
332 ******************************************************************************/
333
334struct nv50_channel_gpfifo_v0 {
335 __u8 version;
336 __u8 chid;
337 __u8 pad01[6];
338 __u32 pushbuf;
339 __u32 ilength;
340 __u64 ioffset;
341};
342
343struct kepler_channel_gpfifo_a_v0 {
344 __u8 version;
345#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
346#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02
347#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04
348#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08
349#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
350#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
351#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
352 __u8 engine;
353 __u16 chid;
354 __u8 pad04[4];
355 __u32 pushbuf;
356 __u32 ilength;
357 __u64 ioffset;
358};
359
360/*******************************************************************************
361 * legacy display
362 ******************************************************************************/
363
364#define NV04_DISP_NTFY_VBLANK 0x00
365#define NV04_DISP_NTFY_CONN 0x01
366
367struct nv04_disp_mthd_v0 {
368 __u8 version;
369#define NV04_DISP_SCANOUTPOS 0x00
370 __u8 method;
371 __u8 head;
372 __u8 pad03[5];
373};
374
375struct nv04_disp_scanoutpos_v0 {
376 __u8 version;
377 __u8 pad01[7];
378 __s64 time[2];
379 __u16 vblanks;
380 __u16 vblanke;
381 __u16 vtotal;
382 __u16 vline;
383 __u16 hblanks;
384 __u16 hblanke;
385 __u16 htotal;
386 __u16 hline;
387};
388
389/*******************************************************************************
390 * display
391 ******************************************************************************/
392
393#define NV50_DISP_MTHD 0x00
394
395struct nv50_disp_mthd_v0 {
396 __u8 version;
397#define NV50_DISP_SCANOUTPOS 0x00
398 __u8 method;
399 __u8 head;
400 __u8 pad03[5];
401};
402
403struct nv50_disp_mthd_v1 {
404 __u8 version;
405#define NV50_DISP_MTHD_V1_DAC_PWR 0x10
406#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11
407#define NV50_DISP_MTHD_V1_SOR_PWR 0x20
408#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21
409#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
410#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
411#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
412#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
413 __u8 method;
414 __u16 hasht;
415 __u16 hashm;
416 __u8 pad06[2];
417};
418
419struct nv50_disp_dac_pwr_v0 {
420 __u8 version;
421 __u8 state;
422 __u8 data;
423 __u8 vsync;
424 __u8 hsync;
425 __u8 pad05[3];
426};
427
428struct nv50_disp_dac_load_v0 {
429 __u8 version;
430 __u8 load;
431 __u16 data;
432 __u8 pad04[4];
433};
434
435struct nv50_disp_sor_pwr_v0 {
436 __u8 version;
437 __u8 state;
438 __u8 pad02[6];
439};
440
441struct nv50_disp_sor_hda_eld_v0 {
442 __u8 version;
443 __u8 pad01[7];
444 __u8 data[];
445};
446
447struct nv50_disp_sor_hdmi_pwr_v0 {
448 __u8 version;
449 __u8 state;
450 __u8 max_ac_packet;
451 __u8 rekey;
452 __u8 pad04[4];
453};
454
455struct nv50_disp_sor_lvds_script_v0 {
456 __u8 version;
457 __u8 pad01[1];
458 __u16 script;
459 __u8 pad04[4];
460};
461
462struct nv50_disp_sor_dp_pwr_v0 {
463 __u8 version;
464 __u8 state;
465 __u8 pad02[6];
466};
467
468struct nv50_disp_pior_pwr_v0 {
469 __u8 version;
470 __u8 state;
471 __u8 type;
472 __u8 pad03[5];
473};
474
475/* core */
476struct nv50_disp_core_channel_dma_v0 {
477 __u8 version;
478 __u8 pad01[3];
479 __u32 pushbuf;
480};
481
482/* cursor immediate */
483struct nv50_disp_cursor_v0 {
484 __u8 version;
485 __u8 head;
486 __u8 pad02[6];
487};
488
489/* base */
490struct nv50_disp_base_channel_dma_v0 {
491 __u8 version;
492 __u8 pad01[2];
493 __u8 head;
494 __u32 pushbuf;
495};
496
497/* overlay */
498struct nv50_disp_overlay_channel_dma_v0 {
499 __u8 version;
500 __u8 pad01[2];
501 __u8 head;
502 __u32 pushbuf;
503};
504
505/* overlay immediate */
506struct nv50_disp_overlay_v0 {
507 __u8 version;
508 __u8 head;
509 __u8 pad02[6];
510};
511
512
513/*******************************************************************************
514 * fermi
515 ******************************************************************************/
516
517#define FERMI_A_ZBC_COLOR 0x00
518#define FERMI_A_ZBC_DEPTH 0x01
519
520struct fermi_a_zbc_color_v0 {
521 __u8 version;
522#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
523#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
524#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
525#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
526#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
527#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
528#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
529#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
530#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
531#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
532#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
533#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
534#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
535#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
536#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
537#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
538#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
539#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
540#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
541 __u8 format;
542 __u8 index;
543 __u8 pad03[5];
544 __u32 ds[4];
545 __u32 l2[4];
546};
547
548struct fermi_a_zbc_depth_v0 {
549 __u8 version;
550#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
551 __u8 format;
552 __u8 index;
553 __u8 pad03[5];
554 __u32 ds;
555 __u32 l2;
556};
557
558#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
new file mode 100644
index 000000000000..3c4df1fc26dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "client.h"
26#include "driver.h"
27#include "ioctl.h"
28
29int
30nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
31{
32 return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
33}
34
35int
36nvif_client_suspend(struct nvif_client *client)
37{
38 return client->driver->suspend(client->base.priv);
39}
40
41int
42nvif_client_resume(struct nvif_client *client)
43{
44 return client->driver->resume(client->base.priv);
45}
46
47void
48nvif_client_fini(struct nvif_client *client)
49{
50 if (client->driver) {
51 client->driver->fini(client->base.priv);
52 client->driver = NULL;
53 client->base.parent = NULL;
54 nvif_object_fini(&client->base);
55 }
56}
57
58const struct nvif_driver *
59nvif_drivers[] = {
60#ifdef __KERNEL__
61 &nvif_driver_nvkm,
62#else
63 &nvif_driver_drm,
64 &nvif_driver_lib,
65#endif
66 NULL
67};
68
69int
70nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
71 const char *name, u64 device, const char *cfg, const char *dbg,
72 struct nvif_client *client)
73{
74 int ret, i;
75
76 ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
77 if (ret)
78 return ret;
79
80 client->base.parent = &client->base;
81 client->base.handle = ~0;
82 client->object = &client->base;
83 client->super = true;
84
85 for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
86 if (!driver || !strcmp(client->driver->name, driver)) {
87 ret = client->driver->init(name, device, cfg, dbg,
88 &client->base.priv);
89 if (!ret || driver)
90 break;
91 }
92 }
93
94 if (ret)
95 nvif_client_fini(client);
96 return ret;
97}
98
99static void
100nvif_client_del(struct nvif_client *client)
101{
102 nvif_client_fini(client);
103 kfree(client);
104}
105
106int
107nvif_client_new(const char *driver, const char *name, u64 device,
108 const char *cfg, const char *dbg,
109 struct nvif_client **pclient)
110{
111 struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
112 if (client) {
113 int ret = nvif_client_init(nvif_client_del, driver, name,
114 device, cfg, dbg, client);
115 if (ret) {
116 kfree(client);
117 client = NULL;
118 }
119 *pclient = client;
120 return ret;
121 }
122 return -ENOMEM;
123}
124
125void
126nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
127{
128 nvif_object_ref(&client->base, (struct nvif_object **)pclient);
129}
diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/nvif/client.h
new file mode 100644
index 000000000000..28352f0882ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.h
@@ -0,0 +1,39 @@
1#ifndef __NVIF_CLIENT_H__
2#define __NVIF_CLIENT_H__
3
4#include "object.h"
5
6struct nvif_client {
7 struct nvif_object base;
8 struct nvif_object *object; /*XXX: hack for nvif_object() */
9 const struct nvif_driver *driver;
10 bool super;
11};
12
13static inline struct nvif_client *
14nvif_client(struct nvif_object *object)
15{
16 while (object && object->parent != object)
17 object = object->parent;
18 return (void *)object;
19}
20
21int nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
22 const char *, u64, const char *, const char *,
23 struct nvif_client *);
24void nvif_client_fini(struct nvif_client *);
25int nvif_client_new(const char *, const char *, u64, const char *,
26 const char *, struct nvif_client **);
27void nvif_client_ref(struct nvif_client *, struct nvif_client **);
28int nvif_client_ioctl(struct nvif_client *, void *, u32);
29int nvif_client_suspend(struct nvif_client *);
30int nvif_client_resume(struct nvif_client *);
31
32/*XXX*/
33#include <core/client.h>
34#define nvkm_client(a) ({ \
35 struct nvif_client *_client = nvif_client(nvif_object(a)); \
36 nouveau_client(_client->base.priv); \
37})
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
new file mode 100644
index 000000000000..f477579725e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "device.h"
26
27void
28nvif_device_fini(struct nvif_device *device)
29{
30 nvif_object_fini(&device->base);
31}
32
33int
34nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
35 u32 handle, u32 oclass, void *data, u32 size,
36 struct nvif_device *device)
37{
38 int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
39 data, size, &device->base);
40 if (ret == 0) {
41 device->object = &device->base;
42 device->info.version = 0;
43 ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
44 &device->info, sizeof(device->info));
45 }
46 return ret;
47}
48
49static void
50nvif_device_del(struct nvif_device *device)
51{
52 nvif_device_fini(device);
53 kfree(device);
54}
55
56int
57nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
58 void *data, u32 size, struct nvif_device **pdevice)
59{
60 struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
61 if (device) {
62 int ret = nvif_device_init(parent, nvif_device_del, handle,
63 oclass, data, size, device);
64 if (ret) {
65 kfree(device);
66 device = NULL;
67 }
68 *pdevice = device;
69 return ret;
70 }
71 return -ENOMEM;
72}
73
74void
75nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
76{
77 nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
78}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h
new file mode 100644
index 000000000000..43180f9fe630
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.h
@@ -0,0 +1,62 @@
1#ifndef __NVIF_DEVICE_H__
2#define __NVIF_DEVICE_H__
3
4#include "object.h"
5#include "class.h"
6
7struct nvif_device {
8 struct nvif_object base;
9 struct nvif_object *object; /*XXX: hack for nvif_object() */
10 struct nv_device_info_v0 info;
11};
12
13static inline struct nvif_device *
14nvif_device(struct nvif_object *object)
15{
16 while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
17 object = object->parent;
18 return (void *)object;
19}
20
21int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
22 u32 handle, u32 oclass, void *, u32,
23 struct nvif_device *);
24void nvif_device_fini(struct nvif_device *);
25int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
26 void *, u32, struct nvif_device **);
27void nvif_device_ref(struct nvif_device *, struct nvif_device **);
28
29/*XXX*/
30#include <subdev/bios.h>
31#include <subdev/fb.h>
32#include <subdev/vm.h>
33#include <subdev/bar.h>
34#include <subdev/gpio.h>
35#include <subdev/clock.h>
36#include <subdev/i2c.h>
37#include <subdev/timer.h>
38#include <subdev/therm.h>
39
40#define nvkm_device(a) nv_device(nvkm_object((a)))
41#define nvkm_bios(a) nouveau_bios(nvkm_device(a))
42#define nvkm_fb(a) nouveau_fb(nvkm_device(a))
43#define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a))
44#define nvkm_bar(a) nouveau_bar(nvkm_device(a))
45#define nvkm_gpio(a) nouveau_gpio(nvkm_device(a))
46#define nvkm_clock(a) nouveau_clock(nvkm_device(a))
47#define nvkm_i2c(a) nouveau_i2c(nvkm_device(a))
48#define nvkm_timer(a) nouveau_timer(nvkm_device(a))
49#define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d))
50#define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c))
51#define nvkm_therm(a) nouveau_therm(nvkm_device(a))
52
53#include <engine/device.h>
54#include <engine/fifo.h>
55#include <engine/graph.h>
56#include <engine/software.h>
57
58#define nvkm_fifo(a) nouveau_fifo(nvkm_device(a))
59#define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a))
60#define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR))
61
62#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
new file mode 100644
index 000000000000..b72a8f0c2758
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -0,0 +1,21 @@
1#ifndef __NVIF_DRIVER_H__
2#define __NVIF_DRIVER_H__
3
4struct nvif_driver {
5 const char *name;
6 int (*init)(const char *name, u64 device, const char *cfg,
7 const char *dbg, void **priv);
8 void (*fini)(void *priv);
9 int (*suspend)(void *priv);
10 int (*resume)(void *priv);
11 int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
12 void *(*map)(void *priv, u64 handle, u32 size);
13 void (*unmap)(void *priv, void *ptr, u32 size);
14 bool keep;
15};
16
17extern const struct nvif_driver nvif_driver_nvkm;
18extern const struct nvif_driver nvif_driver_drm;
19extern const struct nvif_driver nvif_driver_lib;
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/nvif/event.h
new file mode 100644
index 000000000000..21764499b4be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/event.h
@@ -0,0 +1,62 @@
1#ifndef __NVIF_EVENT_H__
2#define __NVIF_EVENT_H__
3
4struct nvif_notify_req_v0 {
5 __u8 version;
6 __u8 reply;
7 __u8 pad02[5];
8#define NVIF_NOTIFY_V0_ROUTE_NVIF 0x00
9 __u8 route;
10 __u64 token; /* must be unique */
11 __u8 data[]; /* request data (below) */
12};
13
14struct nvif_notify_rep_v0 {
15 __u8 version;
16 __u8 pad01[6];
17 __u8 route;
18 __u64 token;
19 __u8 data[]; /* reply data (below) */
20};
21
22struct nvif_notify_head_req_v0 {
23 /* nvif_notify_req ... */
24 __u8 version;
25 __u8 head;
26 __u8 pad02[6];
27};
28
29struct nvif_notify_head_rep_v0 {
30 /* nvif_notify_rep ... */
31 __u8 version;
32 __u8 pad01[7];
33};
34
35struct nvif_notify_conn_req_v0 {
36 /* nvif_notify_req ... */
37 __u8 version;
38#define NVIF_NOTIFY_CONN_V0_PLUG 0x01
39#define NVIF_NOTIFY_CONN_V0_UNPLUG 0x02
40#define NVIF_NOTIFY_CONN_V0_IRQ 0x04
41#define NVIF_NOTIFY_CONN_V0_ANY 0x07
42 __u8 mask;
43 __u8 conn;
44 __u8 pad03[5];
45};
46
47struct nvif_notify_conn_rep_v0 {
48 /* nvif_notify_rep ... */
49 __u8 version;
50 __u8 mask;
51 __u8 pad02[6];
52};
53
54struct nvif_notify_uevent_req {
55 /* nvif_notify_req ... */
56};
57
58struct nvif_notify_uevent_rep {
59 /* nvif_notify_rep ... */
60};
61
62#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/nvif/ioctl.h
new file mode 100644
index 000000000000..4cd8e323b23d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/ioctl.h
@@ -0,0 +1,128 @@
1#ifndef __NVIF_IOCTL_H__
2#define __NVIF_IOCTL_H__
3
4struct nvif_ioctl_v0 {
5 __u8 version;
6#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
7#define NVIF_IOCTL_V0_OWNER_ANY 0xff
8 __u8 owner;
9#define NVIF_IOCTL_V0_NOP 0x00
10#define NVIF_IOCTL_V0_SCLASS 0x01
11#define NVIF_IOCTL_V0_NEW 0x02
12#define NVIF_IOCTL_V0_DEL 0x03
13#define NVIF_IOCTL_V0_MTHD 0x04
14#define NVIF_IOCTL_V0_RD 0x05
15#define NVIF_IOCTL_V0_WR 0x06
16#define NVIF_IOCTL_V0_MAP 0x07
17#define NVIF_IOCTL_V0_UNMAP 0x08
18#define NVIF_IOCTL_V0_NTFY_NEW 0x09
19#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
20#define NVIF_IOCTL_V0_NTFY_GET 0x0b
21#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
22 __u8 type;
23 __u8 path_nr;
24#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
25#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
26 __u8 pad04[3];
27 __u8 route;
28 __u64 token;
29 __u32 path[8]; /* in reverse */
30 __u8 data[]; /* ioctl data (below) */
31};
32
33struct nvif_ioctl_nop {
34};
35
36struct nvif_ioctl_sclass_v0 {
37 /* nvif_ioctl ... */
38 __u8 version;
39 __u8 count;
40 __u8 pad02[6];
41 __u32 oclass[];
42};
43
44struct nvif_ioctl_new_v0 {
45 /* nvif_ioctl ... */
46 __u8 version;
47 __u8 pad01[6];
48 __u8 route;
49 __u64 token;
50 __u32 handle;
51/* these class numbers are made up by us, and not nvidia-assigned */
52#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff
53#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe
54 __u32 oclass;
55 __u8 data[]; /* class data (class.h) */
56};
57
58struct nvif_ioctl_del {
59};
60
61struct nvif_ioctl_rd_v0 {
62 /* nvif_ioctl ... */
63 __u8 version;
64 __u8 size;
65 __u8 pad02[2];
66 __u32 data;
67 __u64 addr;
68};
69
70struct nvif_ioctl_wr_v0 {
71 /* nvif_ioctl ... */
72 __u8 version;
73 __u8 size;
74 __u8 pad02[2];
75 __u32 data;
76 __u64 addr;
77};
78
79struct nvif_ioctl_map_v0 {
80 /* nvif_ioctl ... */
81 __u8 version;
82 __u8 pad01[3];
83 __u32 length;
84 __u64 handle;
85};
86
87struct nvif_ioctl_unmap {
88};
89
90struct nvif_ioctl_ntfy_new_v0 {
91 /* nvif_ioctl ... */
92 __u8 version;
93 __u8 event;
94 __u8 index;
95 __u8 pad03[5];
96 __u8 data[]; /* event request data (event.h) */
97};
98
99struct nvif_ioctl_ntfy_del_v0 {
100 /* nvif_ioctl ... */
101 __u8 version;
102 __u8 index;
103 __u8 pad02[6];
104};
105
106struct nvif_ioctl_ntfy_get_v0 {
107 /* nvif_ioctl ... */
108 __u8 version;
109 __u8 index;
110 __u8 pad02[6];
111};
112
113struct nvif_ioctl_ntfy_put_v0 {
114 /* nvif_ioctl ... */
115 __u8 version;
116 __u8 index;
117 __u8 pad02[6];
118};
119
120struct nvif_ioctl_mthd_v0 {
121 /* nvif_ioctl ... */
122 __u8 version;
123 __u8 method;
124 __u8 pad02[6];
125 __u8 data[]; /* method data (class.h) */
126};
127
128#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/nvif/list.h
new file mode 100644
index 000000000000..8af5d144ecb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/list.h
@@ -0,0 +1,353 @@
1/*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2010 Francisco Jerez <currojerez@riseup.net>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26/* Modified by Ben Skeggs <bskeggs@redhat.com> to match kernel list APIs */
27
28#ifndef _XORG_LIST_H_
29#define _XORG_LIST_H_
30
31/**
32 * @file Classic doubly-link circular list implementation.
33 * For real usage examples of the linked list, see the file test/list.c
34 *
35 * Example:
36 * We need to keep a list of struct foo in the parent struct bar, i.e. what
37 * we want is something like this.
38 *
39 * struct bar {
40 * ...
41 * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{}
42 * ...
43 * }
44 *
45 * We need one list head in bar and a list element in all list_of_foos (both are of
46 * data type 'struct list_head').
47 *
48 * struct bar {
49 * ...
50 * struct list_head list_of_foos;
51 * ...
52 * }
53 *
54 * struct foo {
55 * ...
56 * struct list_head entry;
57 * ...
58 * }
59 *
60 * Now we initialize the list head:
61 *
62 * struct bar bar;
63 * ...
64 * INIT_LIST_HEAD(&bar.list_of_foos);
65 *
66 * Then we create the first element and add it to this list:
67 *
68 * struct foo *foo = malloc(...);
69 * ....
70 * list_add(&foo->entry, &bar.list_of_foos);
71 *
72 * Repeat the above for each element you want to add to the list. Deleting
73 * works with the element itself.
74 * list_del(&foo->entry);
75 * free(foo);
76 *
77 * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty
78 * list again.
79 *
80 * Looping through the list requires a 'struct foo' as iterator and the
81 * name of the field the subnodes use.
82 *
83 * struct foo *iterator;
84 * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
85 * if (iterator->something == ...)
86 * ...
87 * }
88 *
89 * Note: You must not call list_del() on the iterator if you continue the
90 * loop. You need to run the safe for-each loop instead:
91 *
92 * struct foo *iterator, *next;
93 * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
94 * if (...)
95 * list_del(&iterator->entry);
96 * }
97 *
98 */
99
100/**
101 * The linkage struct for list nodes. This struct must be part of your
102 * to-be-linked struct. struct list_head is required for both the head of the
103 * list and for each list node.
104 *
105 * Position and name of the struct list_head field is irrelevant.
106 * There are no requirements that elements of a list are of the same type.
107 * There are no requirements for a list head, any struct list_head can be a list
108 * head.
109 */
110struct list_head {
111 struct list_head *next, *prev;
112};
113
114/**
115 * Initialize the list as an empty list.
116 *
117 * Example:
118 * INIT_LIST_HEAD(&bar->list_of_foos);
119 *
120 * @param The list to initialized.
121 */
122#define LIST_HEAD_INIT(name) { &(name), &(name) }
123
124#define LIST_HEAD(name) \
125 struct list_head name = LIST_HEAD_INIT(name)
126
127static inline void
128INIT_LIST_HEAD(struct list_head *list)
129{
130 list->next = list->prev = list;
131}
132
133static inline void
134__list_add(struct list_head *entry,
135 struct list_head *prev, struct list_head *next)
136{
137 next->prev = entry;
138 entry->next = next;
139 entry->prev = prev;
140 prev->next = entry;
141}
142
143/**
144 * Insert a new element after the given list head. The new element does not
145 * need to be initialised as empty list.
146 * The list changes from:
147 * head → some element → ...
148 * to
149 * head → new element → older element → ...
150 *
151 * Example:
152 * struct foo *newfoo = malloc(...);
153 * list_add(&newfoo->entry, &bar->list_of_foos);
154 *
155 * @param entry The new element to prepend to the list.
156 * @param head The existing list.
157 */
158static inline void
159list_add(struct list_head *entry, struct list_head *head)
160{
161 __list_add(entry, head, head->next);
162}
163
164/**
165 * Append a new element to the end of the list given with this list head.
166 *
167 * The list changes from:
168 * head → some element → ... → lastelement
169 * to
170 * head → some element → ... → lastelement → new element
171 *
172 * Example:
173 * struct foo *newfoo = malloc(...);
174 * list_add_tail(&newfoo->entry, &bar->list_of_foos);
175 *
176 * @param entry The new element to prepend to the list.
177 * @param head The existing list.
178 */
179static inline void
180list_add_tail(struct list_head *entry, struct list_head *head)
181{
182 __list_add(entry, head->prev, head);
183}
184
185static inline void
186__list_del(struct list_head *prev, struct list_head *next)
187{
188 next->prev = prev;
189 prev->next = next;
190}
191
192/**
193 * Remove the element from the list it is in. Using this function will reset
194 * the pointers to/from this element so it is removed from the list. It does
195 * NOT free the element itself or manipulate it otherwise.
196 *
197 * Using list_del on a pure list head (like in the example at the top of
198 * this file) will NOT remove the first element from
199 * the list but rather reset the list as empty list.
200 *
201 * Example:
202 * list_del(&foo->entry);
203 *
204 * @param entry The element to remove.
205 */
206static inline void
207list_del(struct list_head *entry)
208{
209 __list_del(entry->prev, entry->next);
210}
211
212static inline void
213list_del_init(struct list_head *entry)
214{
215 __list_del(entry->prev, entry->next);
216 INIT_LIST_HEAD(entry);
217}
218
219static inline void list_move_tail(struct list_head *list,
220 struct list_head *head)
221{
222 __list_del(list->prev, list->next);
223 list_add_tail(list, head);
224}
225
226/**
227 * Check if the list is empty.
228 *
229 * Example:
230 * list_empty(&bar->list_of_foos);
231 *
232 * @return True if the list contains one or more elements or False otherwise.
233 */
234static inline bool
235list_empty(struct list_head *head)
236{
237 return head->next == head;
238}
239
240/**
241 * Returns a pointer to the container of this list element.
242 *
243 * Example:
244 * struct foo* f;
245 * f = container_of(&foo->entry, struct foo, entry);
246 * assert(f == foo);
247 *
248 * @param ptr Pointer to the struct list_head.
249 * @param type Data type of the list element.
250 * @param member Member name of the struct list_head field in the list element.
251 * @return A pointer to the data struct containing the list head.
252 */
253#ifndef container_of
254#define container_of(ptr, type, member) \
255 (type *)((char *)(ptr) - (char *) &((type *)0)->member)
256#endif
257
258/**
259 * Alias of container_of
260 */
261#define list_entry(ptr, type, member) \
262 container_of(ptr, type, member)
263
264/**
265 * Retrieve the first list entry for the given list pointer.
266 *
267 * Example:
268 * struct foo *first;
269 * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos);
270 *
271 * @param ptr The list head
272 * @param type Data type of the list element to retrieve
273 * @param member Member name of the struct list_head field in the list element.
274 * @return A pointer to the first list element.
275 */
276#define list_first_entry(ptr, type, member) \
277 list_entry((ptr)->next, type, member)
278
279/**
280 * Retrieve the last list entry for the given listpointer.
281 *
282 * Example:
283 * struct foo *first;
284 * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos);
285 *
286 * @param ptr The list head
287 * @param type Data type of the list element to retrieve
288 * @param member Member name of the struct list_head field in the list element.
289 * @return A pointer to the last list element.
290 */
291#define list_last_entry(ptr, type, member) \
292 list_entry((ptr)->prev, type, member)
293
294#define __container_of(ptr, sample, member) \
295 (void *)container_of((ptr), typeof(*(sample)), member)
296
297/**
298 * Loop through the list given by head and set pos to struct in the list.
299 *
300 * Example:
301 * struct foo *iterator;
302 * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
303 * [modify iterator]
304 * }
305 *
306 * This macro is not safe for node deletion. Use list_for_each_entry_safe
307 * instead.
308 *
309 * @param pos Iterator variable of the type of the list elements.
310 * @param head List head
311 * @param member Member name of the struct list_head in the list elements.
312 *
313 */
314#define list_for_each_entry(pos, head, member) \
315 for (pos = __container_of((head)->next, pos, member); \
316 &pos->member != (head); \
317 pos = __container_of(pos->member.next, pos, member))
318
319/**
320 * Loop through the list, keeping a backup pointer to the element. This
321 * macro allows for the deletion of a list element while looping through the
322 * list.
323 *
324 * See list_for_each_entry for more details.
325 */
326#define list_for_each_entry_safe(pos, tmp, head, member) \
327 for (pos = __container_of((head)->next, pos, member), \
328 tmp = __container_of(pos->member.next, pos, member); \
329 &pos->member != (head); \
330 pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
331
332
333#define list_for_each_entry_reverse(pos, head, member) \
334 for (pos = __container_of((head)->prev, pos, member); \
335 &pos->member != (head); \
336 pos = __container_of(pos->member.prev, pos, member))
337
338#define list_for_each_entry_continue(pos, head, member) \
339 for (pos = __container_of(pos->member.next, pos, member); \
340 &pos->member != (head); \
341 pos = __container_of(pos->member.next, pos, member))
342
343#define list_for_each_entry_continue_reverse(pos, head, member) \
344 for (pos = __container_of(pos->member.prev, pos, member); \
345 &pos->member != (head); \
346 pos = __container_of(pos->member.prev, pos, member))
347
348#define list_for_each_entry_from(pos, head, member) \
349 for (; \
350 &pos->member != (head); \
351 pos = __container_of(pos->member.next, pos, member))
352
353#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
new file mode 100644
index 000000000000..7c06123a559c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <nvif/client.h>
26#include <nvif/driver.h>
27#include <nvif/notify.h>
28#include <nvif/object.h>
29#include <nvif/ioctl.h>
30#include <nvif/event.h>
31
32static inline int
33nvif_notify_put_(struct nvif_notify *notify)
34{
35 struct nvif_object *object = notify->object;
36 struct {
37 struct nvif_ioctl_v0 ioctl;
38 struct nvif_ioctl_ntfy_put_v0 ntfy;
39 } args = {
40 .ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
41 .ntfy.index = notify->index,
42 };
43
44 if (atomic_inc_return(&notify->putcnt) != 1)
45 return 0;
46
47 return nvif_object_ioctl(object, &args, sizeof(args), NULL);
48}
49
50int
51nvif_notify_put(struct nvif_notify *notify)
52{
53 if (likely(notify->object) &&
54 test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
55 int ret = nvif_notify_put_(notify);
56 if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
57 flush_work(&notify->work);
58 return ret;
59 }
60 return 0;
61}
62
63static inline int
64nvif_notify_get_(struct nvif_notify *notify)
65{
66 struct nvif_object *object = notify->object;
67 struct {
68 struct nvif_ioctl_v0 ioctl;
69 struct nvif_ioctl_ntfy_get_v0 ntfy;
70 } args = {
71 .ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
72 .ntfy.index = notify->index,
73 };
74
75 if (atomic_dec_return(&notify->putcnt) != 0)
76 return 0;
77
78 return nvif_object_ioctl(object, &args, sizeof(args), NULL);
79}
80
81int
82nvif_notify_get(struct nvif_notify *notify)
83{
84 if (likely(notify->object) &&
85 !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
86 return nvif_notify_get_(notify);
87 return 0;
88}
89
90static void
91nvif_notify_work(struct work_struct *work)
92{
93 struct nvif_notify *notify = container_of(work, typeof(*notify), work);
94 if (notify->func(notify) == NVIF_NOTIFY_KEEP)
95 nvif_notify_get_(notify);
96}
97
98int
99nvif_notify(const void *header, u32 length, const void *data, u32 size)
100{
101 struct nvif_notify *notify = NULL;
102 const union {
103 struct nvif_notify_rep_v0 v0;
104 } *args = header;
105 int ret = NVIF_NOTIFY_DROP;
106
107 if (length == sizeof(args->v0) && args->v0.version == 0) {
108 if (WARN_ON(args->v0.route))
109 return NVIF_NOTIFY_DROP;
110 notify = (void *)(unsigned long)args->v0.token;
111 }
112
113 if (!WARN_ON(notify == NULL)) {
114 struct nvif_client *client = nvif_client(notify->object);
115 if (!WARN_ON(notify->size != size)) {
116 if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
117 atomic_inc(&notify->putcnt);
118 memcpy((void *)notify->data, data, size);
119 schedule_work(&notify->work);
120 return NVIF_NOTIFY_DROP;
121 }
122 notify->data = data;
123 ret = notify->func(notify);
124 notify->data = NULL;
125 if (ret != NVIF_NOTIFY_DROP && client->driver->keep) {
126 atomic_inc(&notify->putcnt);
127 nvif_notify_get_(notify);
128 }
129 }
130 }
131
132 return ret;
133}
134
135int
136nvif_notify_fini(struct nvif_notify *notify)
137{
138 struct nvif_object *object = notify->object;
139 struct {
140 struct nvif_ioctl_v0 ioctl;
141 struct nvif_ioctl_ntfy_del_v0 ntfy;
142 } args = {
143 .ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
144 .ntfy.index = notify->index,
145 };
146 int ret = nvif_notify_put(notify);
147 if (ret >= 0 && object) {
148 ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
149 if (ret == 0) {
150 nvif_object_ref(NULL, &notify->object);
151 kfree((void *)notify->data);
152 }
153 }
154 return ret;
155}
156
157int
158nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
159 int (*func)(struct nvif_notify *), bool work, u8 event,
160 void *data, u32 size, u32 reply, struct nvif_notify *notify)
161{
162 struct {
163 struct nvif_ioctl_v0 ioctl;
164 struct nvif_ioctl_ntfy_new_v0 ntfy;
165 struct nvif_notify_req_v0 req;
166 } *args;
167 int ret = -ENOMEM;
168
169 notify->object = NULL;
170 nvif_object_ref(object, &notify->object);
171 notify->flags = 0;
172 atomic_set(&notify->putcnt, 1);
173 notify->dtor = dtor;
174 notify->func = func;
175 notify->data = NULL;
176 notify->size = reply;
177 if (work) {
178 INIT_WORK(&notify->work, nvif_notify_work);
179 set_bit(NVIF_NOTIFY_WORK, &notify->flags);
180 notify->data = kmalloc(notify->size, GFP_KERNEL);
181 if (!notify->data)
182 goto done;
183 }
184
185 if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
186 goto done;
187 args->ioctl.version = 0;
188 args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
189 args->ntfy.version = 0;
190 args->ntfy.event = event;
191 args->req.version = 0;
192 args->req.reply = notify->size;
193 args->req.route = 0;
194 args->req.token = (unsigned long)(void *)notify;
195
196 memcpy(args->req.data, data, size);
197 ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
198 notify->index = args->ntfy.index;
199 kfree(args);
200done:
201 if (ret)
202 nvif_notify_fini(notify);
203 return ret;
204}
205
206static void
207nvif_notify_del(struct nvif_notify *notify)
208{
209 nvif_notify_fini(notify);
210 kfree(notify);
211}
212
213void
214nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
215{
216 BUG_ON(notify != NULL);
217 if (*pnotify)
218 (*pnotify)->dtor(*pnotify);
219 *pnotify = notify;
220}
221
222int
223nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
224 bool work, u8 type, void *data, u32 size, u32 reply,
225 struct nvif_notify **pnotify)
226{
227 struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
228 if (notify) {
229 int ret = nvif_notify_init(object, nvif_notify_del, func, work,
230 type, data, size, reply, notify);
231 if (ret)
232 kfree(notify);
233 *pnotify = notify;
234 return ret;
235 }
236 return -ENOMEM;
237}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/nvif/notify.h
new file mode 100644
index 000000000000..9ebfa3b45e76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.h
@@ -0,0 +1,39 @@
1#ifndef __NVIF_NOTIFY_H__
2#define __NVIF_NOTIFY_H__
3
4struct nvif_notify {
5 struct nvif_object *object;
6 int index;
7
8#define NVIF_NOTIFY_USER 0
9#define NVIF_NOTIFY_WORK 1
10 unsigned long flags;
11 atomic_t putcnt;
12 void (*dtor)(struct nvif_notify *);
13#define NVIF_NOTIFY_DROP 0
14#define NVIF_NOTIFY_KEEP 1
15 int (*func)(struct nvif_notify *);
16
17 /* this is const for a *very* good reason - the data might be on the
18 * stack from an irq handler. if you're not nvif/notify.c then you
19 * should probably think twice before casting it away...
20 */
21 const void *data;
22 u32 size;
23 struct work_struct work;
24};
25
26int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
27 int (*func)(struct nvif_notify *), bool work, u8 type,
28 void *data, u32 size, u32 reply, struct nvif_notify *);
29int nvif_notify_fini(struct nvif_notify *);
30int nvif_notify_get(struct nvif_notify *);
31int nvif_notify_put(struct nvif_notify *);
32int nvif_notify(const void *, u32, const void *, u32);
33
34int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
35 bool work, u8 type, void *data, u32 size, u32 reply,
36 struct nvif_notify **);
37void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
new file mode 100644
index 000000000000..b0c82206ece2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -0,0 +1,302 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "object.h"
26#include "client.h"
27#include "driver.h"
28#include "ioctl.h"
29
30int
31nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
32{
33 struct nvif_client *client = nvif_client(object);
34 union {
35 struct nvif_ioctl_v0 v0;
36 } *args = data;
37
38 if (size >= sizeof(*args) && args->v0.version == 0) {
39 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
40 args->v0.path_nr = 0;
41 while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
42 args->v0.path[args->v0.path_nr++] = object->handle;
43 if (object->parent == object)
44 break;
45 object = object->parent;
46 }
47 } else
48 return -ENOSYS;
49
50 return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
51}
52
53int
54nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
55{
56 struct {
57 struct nvif_ioctl_v0 ioctl;
58 struct nvif_ioctl_sclass_v0 sclass;
59 } *args;
60 u32 size = count * sizeof(args->sclass.oclass[0]);
61 int ret;
62
63 if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
64 return -ENOMEM;
65 args->ioctl.version = 0;
66 args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
67 args->sclass.version = 0;
68 args->sclass.count = count;
69
70 memcpy(args->sclass.oclass, oclass, size);
71 ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
72 ret = ret ? ret : args->sclass.count;
73 memcpy(oclass, args->sclass.oclass, size);
74 kfree(args);
75 return ret;
76}
77
78u32
79nvif_object_rd(struct nvif_object *object, int size, u64 addr)
80{
81 struct {
82 struct nvif_ioctl_v0 ioctl;
83 struct nvif_ioctl_rd_v0 rd;
84 } args = {
85 .ioctl.type = NVIF_IOCTL_V0_RD,
86 .rd.size = size,
87 .rd.addr = addr,
88 };
89 int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
90 if (ret) {
91 /*XXX: warn? */
92 return 0;
93 }
94 return args.rd.data;
95}
96
97void
98nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
99{
100 struct {
101 struct nvif_ioctl_v0 ioctl;
102 struct nvif_ioctl_wr_v0 wr;
103 } args = {
104 .ioctl.type = NVIF_IOCTL_V0_WR,
105 .wr.size = size,
106 .wr.addr = addr,
107 .wr.data = data,
108 };
109 int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
110 if (ret) {
111 /*XXX: warn? */
112 }
113}
114
115int
116nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
117{
118 struct {
119 struct nvif_ioctl_v0 ioctl;
120 struct nvif_ioctl_mthd_v0 mthd;
121 } *args;
122 u8 stack[128];
123 int ret;
124
125 if (sizeof(*args) + size > sizeof(stack)) {
126 if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
127 return -ENOMEM;
128 } else {
129 args = (void *)stack;
130 }
131 args->ioctl.version = 0;
132 args->ioctl.type = NVIF_IOCTL_V0_MTHD;
133 args->mthd.version = 0;
134 args->mthd.method = mthd;
135
136 memcpy(args->mthd.data, data, size);
137 ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
138 memcpy(data, args->mthd.data, size);
139 if (args != (void *)stack)
140 kfree(args);
141 return ret;
142}
143
144void
145nvif_object_unmap(struct nvif_object *object)
146{
147 if (object->map.size) {
148 struct nvif_client *client = nvif_client(object);
149 struct {
150 struct nvif_ioctl_v0 ioctl;
151 struct nvif_ioctl_unmap unmap;
152 } args = {
153 .ioctl.type = NVIF_IOCTL_V0_UNMAP,
154 };
155
156 if (object->map.ptr) {
157 client->driver->unmap(client, object->map.ptr,
158 object->map.size);
159 object->map.ptr = NULL;
160 }
161
162 nvif_object_ioctl(object, &args, sizeof(args), NULL);
163 object->map.size = 0;
164 }
165}
166
167int
168nvif_object_map(struct nvif_object *object)
169{
170 struct nvif_client *client = nvif_client(object);
171 struct {
172 struct nvif_ioctl_v0 ioctl;
173 struct nvif_ioctl_map_v0 map;
174 } args = {
175 .ioctl.type = NVIF_IOCTL_V0_MAP,
176 };
177 int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
178 if (ret == 0) {
179 object->map.size = args.map.length;
180 object->map.ptr = client->driver->map(client, args.map.handle,
181 object->map.size);
182 if (ret = -ENOMEM, object->map.ptr)
183 return 0;
184 nvif_object_unmap(object);
185 }
186 return ret;
187}
188
189struct ctor {
190 struct nvif_ioctl_v0 ioctl;
191 struct nvif_ioctl_new_v0 new;
192};
193
194void
195nvif_object_fini(struct nvif_object *object)
196{
197 struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
198 if (object->parent) {
199 struct {
200 struct nvif_ioctl_v0 ioctl;
201 struct nvif_ioctl_del del;
202 } args = {
203 .ioctl.type = NVIF_IOCTL_V0_DEL,
204 };
205
206 nvif_object_unmap(object);
207 nvif_object_ioctl(object, &args, sizeof(args), NULL);
208 if (object->data) {
209 object->size = 0;
210 object->data = NULL;
211 kfree(ctor);
212 }
213 nvif_object_ref(NULL, &object->parent);
214 }
215}
216
217int
218nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
219 u32 handle, u32 oclass, void *data, u32 size,
220 struct nvif_object *object)
221{
222 struct ctor *ctor;
223 int ret = 0;
224
225 object->parent = NULL;
226 object->object = object;
227 nvif_object_ref(parent, &object->parent);
228 kref_init(&object->refcount);
229 object->handle = handle;
230 object->oclass = oclass;
231 object->data = NULL;
232 object->size = 0;
233 object->dtor = dtor;
234 object->map.ptr = NULL;
235 object->map.size = 0;
236
237 if (object->parent) {
238 if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
239 nvif_object_fini(object);
240 return -ENOMEM;
241 }
242 object->data = ctor->new.data;
243 object->size = size;
244 memcpy(object->data, data, size);
245
246 ctor->ioctl.version = 0;
247 ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
248 ctor->new.version = 0;
249 ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
250 ctor->new.token = (unsigned long)(void *)object;
251 ctor->new.handle = handle;
252 ctor->new.oclass = oclass;
253
254 ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
255 object->size, &object->priv);
256 }
257
258 if (ret)
259 nvif_object_fini(object);
260 return ret;
261}
262
263static void
264nvif_object_del(struct nvif_object *object)
265{
266 nvif_object_fini(object);
267 kfree(object);
268}
269
270int
271nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
272 void *data, u32 size, struct nvif_object **pobject)
273{
274 struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
275 if (object) {
276 int ret = nvif_object_init(parent, nvif_object_del, handle,
277 oclass, data, size, object);
278 if (ret)
279 kfree(object);
280 *pobject = object;
281 return ret;
282 }
283 return -ENOMEM;
284}
285
286static void
287nvif_object_put(struct kref *kref)
288{
289 struct nvif_object *object =
290 container_of(kref, typeof(*object), refcount);
291 object->dtor(object);
292}
293
294void
295nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
296{
297 if (object)
298 kref_get(&object->refcount);
299 if (*pobject)
300 kref_put(&(*pobject)->refcount, nvif_object_put);
301 *pobject = object;
302}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h
new file mode 100644
index 000000000000..fac3a3bbec44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.h
@@ -0,0 +1,75 @@
1#ifndef __NVIF_OBJECT_H__
2#define __NVIF_OBJECT_H__
3
4#include <nvif/os.h>
5
6struct nvif_object {
7 struct nvif_object *parent;
8 struct nvif_object *object; /*XXX: hack for nvif_object() */
9 struct kref refcount;
10 u32 handle;
11 u32 oclass;
12 void *data;
13 u32 size;
14 void *priv; /*XXX: hack */
15 void (*dtor)(struct nvif_object *);
16 struct {
17 void *ptr;
18 u32 size;
19 } map;
20};
21
22int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
23 u32 handle, u32 oclass, void *, u32,
24 struct nvif_object *);
25void nvif_object_fini(struct nvif_object *);
26int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
27 void *, u32, struct nvif_object **);
28void nvif_object_ref(struct nvif_object *, struct nvif_object **);
29int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
30int nvif_object_sclass(struct nvif_object *, u32 *, int);
31u32 nvif_object_rd(struct nvif_object *, int, u64);
32void nvif_object_wr(struct nvif_object *, int, u64, u32);
33int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
34int nvif_object_map(struct nvif_object *);
35void nvif_object_unmap(struct nvif_object *);
36
37#define nvif_object(a) (a)->object
38
39#define ioread8_native ioread8
40#define iowrite8_native iowrite8
41#define nvif_rd(a,b,c) ({ \
42 struct nvif_object *_object = nvif_object(a); \
43 u32 _data; \
44 if (likely(_object->map.ptr)) \
45 _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \
46 else \
47 _data = nvif_object_rd(_object, (b) / 8, (c)); \
48 _data; \
49})
50#define nvif_wr(a,b,c,d) ({ \
51 struct nvif_object *_object = nvif_object(a); \
52 if (likely(_object->map.ptr)) \
53 iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \
54 else \
55 nvif_object_wr(_object, (b) / 8, (c), (d)); \
56})
57#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; })
58#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
59#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
60#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
61#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
62#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
63#define nvif_mask(a,b,c,d) ({ \
64 u32 _v = nvif_rd32(nvif_object(a), (b)); \
65 nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \
66 _v; \
67})
68
69#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
70
71/*XXX*/
72#include <core/object.h>
73#define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv)
74
75#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h
new file mode 120000
index 000000000000..bd744b2cf5cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/os.h
@@ -0,0 +1 @@
../core/os.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/nvif/unpack.h
new file mode 100644
index 000000000000..5933188b4a77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/unpack.h
@@ -0,0 +1,24 @@
1#ifndef __NVIF_UNPACK_H__
2#define __NVIF_UNPACK_H__
3
4#define nvif_unvers(d) ({ \
5 ret = (size == sizeof(d)) ? 0 : -ENOSYS; \
6 (ret == 0); \
7})
8
9#define nvif_unpack(d,vl,vh,m) ({ \
10 if ((vl) == 0 || ret == -ENOSYS) { \
11 int _size = sizeof(d); \
12 if (_size <= size && (d).version >= (vl) && \
13 (d).version <= (vh)) { \
14 data = (u8 *)data + _size; \
15 size = size - _size; \
16 ret = ((m) || !size) ? 0 : -E2BIG; \
17 } else { \
18 ret = -ENOSYS; \
19 } \
20 } \
21 (ret == 0); \
22})
23
24#endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index ca65df144765..c96db433f8af 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -848,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
848 if (count) { 848 if (count) {
849 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 849 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
850 ttm->pages[index] = d_page->p; 850 ttm->pages[index] = d_page->p;
851 ttm_dma->cpu_address[index] = d_page->vaddr;
851 ttm_dma->dma_address[index] = d_page->dma; 852 ttm_dma->dma_address[index] = d_page->dma;
852 list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 853 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
853 r = 0; 854 r = 0;
@@ -979,6 +980,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
979 INIT_LIST_HEAD(&ttm_dma->pages_list); 980 INIT_LIST_HEAD(&ttm_dma->pages_list);
980 for (i = 0; i < ttm->num_pages; i++) { 981 for (i = 0; i < ttm->num_pages; i++) {
981 ttm->pages[i] = NULL; 982 ttm->pages[i] = NULL;
983 ttm_dma->cpu_address[i] = 0;
982 ttm_dma->dma_address[i] = 0; 984 ttm_dma->dma_address[i] = 0;
983 } 985 }
984 986
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75f319090043..bf080abc86d1 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
55 55
56static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 56static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57{ 57{
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); 58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
59 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, 59 sizeof(*ttm->ttm.pages) +
60 sizeof(*ttm->dma_address)); 60 sizeof(*ttm->dma_address) +
61 sizeof(*ttm->cpu_address));
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
63 ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
61} 64}
62 65
63#ifdef CONFIG_X86 66#ifdef CONFIG_X86
@@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
228 231
229 INIT_LIST_HEAD(&ttm_dma->pages_list); 232 INIT_LIST_HEAD(&ttm_dma->pages_list);
230 ttm_dma_tt_alloc_page_directory(ttm_dma); 233 ttm_dma_tt_alloc_page_directory(ttm_dma);
231 if (!ttm->pages || !ttm_dma->dma_address) { 234 if (!ttm->pages) {
232 ttm_tt_destroy(ttm); 235 ttm_tt_destroy(ttm);
233 pr_err("Failed allocating page table\n"); 236 pr_err("Failed allocating page table\n");
234 return -ENOMEM; 237 return -ENOMEM;
@@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
243 246
244 drm_free_large(ttm->pages); 247 drm_free_large(ttm->pages);
245 ttm->pages = NULL; 248 ttm->pages = NULL;
246 drm_free_large(ttm_dma->dma_address); 249 ttm_dma->cpu_address = NULL;
247 ttm_dma->dma_address = NULL; 250 ttm_dma->dma_address = NULL;
248} 251}
249EXPORT_SYMBOL(ttm_dma_tt_fini); 252EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 202f0a7171e8..1d9f0f1ff52d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -133,6 +133,7 @@ struct ttm_tt {
133 * struct ttm_dma_tt 133 * struct ttm_dma_tt
134 * 134 *
135 * @ttm: Base ttm_tt struct. 135 * @ttm: Base ttm_tt struct.
136 * @cpu_address: The CPU address of the pages
136 * @dma_address: The DMA (bus) addresses of the pages 137 * @dma_address: The DMA (bus) addresses of the pages
137 * @pages_list: used by some page allocation backend 138 * @pages_list: used by some page allocation backend
138 * 139 *
@@ -142,6 +143,7 @@ struct ttm_tt {
142 */ 143 */
143struct ttm_dma_tt { 144struct ttm_dma_tt {
144 struct ttm_tt ttm; 145 struct ttm_tt ttm;
146 void **cpu_address;
145 dma_addr_t *dma_address; 147 dma_addr_t *dma_address;
146 struct list_head pages_list; 148 struct list_head pages_list;
147}; 149};
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 2a5769fdf8ba..0d7608dc1a34 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -25,6 +25,16 @@
25#ifndef __NOUVEAU_DRM_H__ 25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__ 26#define __NOUVEAU_DRM_H__
27 27
28#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
29
30/* reserved object handles when using deprecated object APIs - these
31 * are here so that libdrm can allow interoperability with the new
32 * object APIs
33 */
34#define NOUVEAU_ABI16_CLIENT 0xffffffff
35#define NOUVEAU_ABI16_DEVICE 0xdddddddd
36#define NOUVEAU_ABI16_CHAN(n) (0xcccc0000 | (n))
37
28#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0) 38#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
29#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) 39#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
30#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) 40#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
@@ -123,6 +133,7 @@ struct drm_nouveau_gem_cpu_fini {
123#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */ 133#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
124#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */ 134#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
125#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */ 135#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
136#define DRM_NOUVEAU_NVIF 0x07
126#define DRM_NOUVEAU_GEM_NEW 0x40 137#define DRM_NOUVEAU_GEM_NEW 0x40
127#define DRM_NOUVEAU_GEM_PUSHBUF 0x41 138#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
128#define DRM_NOUVEAU_GEM_CPU_PREP 0x42 139#define DRM_NOUVEAU_GEM_CPU_PREP 0x42